Merge branch 'master' into reload_dictionary

This commit is contained in:
Guillaume Tassery 2019-12-23 10:46:02 +07:00 committed by GitHub
commit 15fb9ad6de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
254 changed files with 2242 additions and 41028 deletions

7
.gitmodules vendored
View File

@ -67,10 +67,10 @@
url = https://github.com/ClickHouse-Extras/libgsasl.git
[submodule "contrib/libcxx"]
path = contrib/libcxx
url = https://github.com/llvm-mirror/libcxx.git
url = https://github.com/ClickHouse-Extras/libcxx.git
[submodule "contrib/libcxxabi"]
path = contrib/libcxxabi
url = https://github.com/llvm-mirror/libcxxabi.git
url = https://github.com/ClickHouse-Extras/libcxxabi.git
[submodule "contrib/snappy"]
path = contrib/snappy
url = https://github.com/google/snappy
@ -128,3 +128,6 @@
[submodule "contrib/icu"]
path = contrib/icu
url = https://github.com/unicode-org/icu.git
[submodule "contrib/libc-headers"]
path = contrib/libc-headers
url = https://github.com/ClickHouse-Extras/libc-headers.git

View File

@ -222,7 +222,7 @@ else ()
set(NOT_UNBUNDLED 1)
endif ()
# Using system libs can cause lot of warnings in includes.
# Using system libs can cause lot of warnings in includes (on macro expansion).
if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32)
option (NO_WERROR "Disable -Werror compiler option" ON)
endif ()
@ -352,7 +352,6 @@ if (ENABLE_TESTS)
endif ()
# Need to process before "contrib" dir:
include (libs/libcommon/cmake/find_gperftools.cmake)
include (libs/libcommon/cmake/find_jemalloc.cmake)
include (libs/libcommon/cmake/find_cctz.cmake)
include (libs/libmysqlxx/cmake/find_mysqlclient.cmake)
@ -362,18 +361,6 @@ include (libs/libmysqlxx/cmake/find_mysqlclient.cmake)
if (USE_JEMALLOC)
message (STATUS "Link jemalloc: ${JEMALLOC_LIBRARIES}")
set (MALLOC_LIBRARIES ${JEMALLOC_LIBRARIES})
elseif (USE_TCMALLOC)
if (DEBUG_TCMALLOC AND NOT GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG)
message (FATAL_ERROR "Requested DEBUG_TCMALLOC but debug library is not found. You should install Google Perftools. Example: sudo apt-get install libgoogle-perftools-dev")
endif ()
if (DEBUG_TCMALLOC AND GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG)
message (STATUS "Link libtcmalloc_minimal_debug for testing: ${GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG}")
set (MALLOC_LIBRARIES ${GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG})
else ()
message (STATUS "Link libtcmalloc_minimal: ${GPERFTOOLS_TCMALLOC_MINIMAL}")
set (MALLOC_LIBRARIES ${GPERFTOOLS_TCMALLOC_MINIMAL})
endif ()
elseif (SANITIZE)
message (STATUS "Will use ${SANITIZE} sanitizer.")
elseif (OS_LINUX)

View File

@ -1,61 +0,0 @@
# https://github.com/vast-io/vast/blob/master/cmake/FindGperftools.cmake
# Tries to find Gperftools.
#
# Usage of this module as follows:
#
# find_package(Gperftools)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# Gperftools_ROOT_DIR Set this variable to the root installation of
# Gperftools if the module has problems finding
# the proper installation path.
#
# Variables defined by this module:
#
# GPERFTOOLS_FOUND System has Gperftools libs/headers
# GPERFTOOLS_LIBRARIES The Gperftools libraries (tcmalloc & profiler)
# GPERFTOOLS_INCLUDE_DIR The location of Gperftools headers
find_library(GPERFTOOLS_TCMALLOC
NAMES tcmalloc
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_TCMALLOC_MINIMAL
NAMES tcmalloc_minimal
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG
NAMES tcmalloc_minimal_debug
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_PROFILER
NAMES profiler
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_TCMALLOC_AND_PROFILER
NAMES tcmalloc_and_profiler
HINTS ${Gperftools_ROOT_DIR}/lib)
find_path(GPERFTOOLS_INCLUDE_DIR
NAMES gperftools/heap-profiler.h
HINTS ${Gperftools_ROOT_DIR}/include)
set(GPERFTOOLS_LIBRARIES ${GPERFTOOLS_TCMALLOC_AND_PROFILER})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(
Gperftools
DEFAULT_MSG
GPERFTOOLS_LIBRARIES
GPERFTOOLS_INCLUDE_DIR)
mark_as_advanced(
Gperftools_ROOT_DIR
GPERFTOOLS_TCMALLOC
GPERFTOOLS_PROFILER
GPERFTOOLS_TCMALLOC_AND_PROFILER
GPERFTOOLS_LIBRARIES
GPERFTOOLS_INCLUDE_DIR)

View File

@ -19,6 +19,6 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
set (ARCH_PPC64LE 1)
# FIXME: move this check into tools.cmake
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
message(FATAL_ERROR "Only gcc-8 is supported for powerpc architecture")
message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
endif ()
endif ()

View File

@ -1,7 +1,5 @@
if (COMPILER_CLANG)
option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++" ON)
option (USE_INTERNAL_LIBCXX_LIBRARY "Set to FALSE to use system libcxx and libcxxabi libraries instead of bundled" ${NOT_UNBUNDLED})
endif()
option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++" ${NOT_UNBUNDLED})
option (USE_INTERNAL_LIBCXX_LIBRARY "Set to FALSE to use system libcxx and libcxxabi libraries instead of bundled" ${NOT_UNBUNDLED})
if (USE_LIBCXX)
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_DEBUG=0") # More checks in debug build.

View File

@ -1,7 +1,7 @@
# Broken in macos. TODO: update clang, re-test, enable
if (NOT APPLE)
option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile' option for query execution" ${ENABLE_LIBRARIES})
option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library. Default: system library for quicker developer builds." 0)
option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile_expressions' option for query execution" ${ENABLE_LIBRARIES})
option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library." ${NOT_UNBUNDLED})
endif ()
if (ENABLE_EMBEDDED_COMPILER)
@ -13,27 +13,11 @@ if (ENABLE_EMBEDDED_COMPILER)
if (NOT USE_INTERNAL_LLVM_LIBRARY)
set (LLVM_PATHS "/usr/local/lib/llvm")
if (LLVM_VERSION)
find_package(LLVM ${LLVM_VERSION} CONFIG PATHS ${LLVM_PATHS})
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
find_package(LLVM ${CMAKE_CXX_COMPILER_VERSION} CONFIG PATHS ${LLVM_PATHS})
else ()
# TODO: 9 8
foreach(llvm_v 7.1 7 6 5)
if (NOT LLVM_FOUND)
find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS})
endif ()
endforeach ()
endif ()
if (LLVM_FOUND)
find_library (LLD_LIBRARY_TEST lldCore PATHS ${LLVM_LIBRARY_DIRS})
find_path (LLD_INCLUDE_DIR_TEST NAMES lld/Core/AbsoluteAtom.h PATHS ${LLVM_INCLUDE_DIRS})
if (NOT LLD_LIBRARY_TEST OR NOT LLD_INCLUDE_DIR_TEST)
set (LLVM_FOUND 0)
message(WARNING "liblld (${LLD_LIBRARY_TEST}, ${LLD_INCLUDE_DIR_TEST}) not found in ${LLVM_INCLUDE_DIRS} ${LLVM_LIBRARY_DIRS}. Disabling internal compiler.")
foreach(llvm_v 9 8)
if (NOT LLVM_FOUND)
find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS})
endif ()
endif ()
endforeach ()
if (LLVM_FOUND)
# Remove dynamically-linked zlib and libedit from LLVM's dependencies:
@ -51,30 +35,39 @@ if (ENABLE_EMBEDDED_COMPILER)
set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0)
endif ()
# TODO: fix llvm 8+ and remove:
if (LLVM_FOUND AND LLVM_VERSION_MAJOR GREATER 7)
message(WARNING "LLVM 8+ not supported yet, disabling.")
set (USE_EMBEDDED_COMPILER 0)
endif ()
else()
set (LLVM_FOUND 1)
set (USE_EMBEDDED_COMPILER 1)
set (LLVM_VERSION "7.0.0bundled")
set (LLVM_INCLUDE_DIRS
${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/include
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/include
${ClickHouse_SOURCE_DIR}/contrib/llvm/clang/include
${ClickHouse_BINARY_DIR}/contrib/llvm/clang/include
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/tools/clang/include
${ClickHouse_SOURCE_DIR}/contrib/llvm/lld/include
${ClickHouse_BINARY_DIR}/contrib/llvm/lld/include
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/tools/lld/include)
set (LLVM_LIBRARY_DIRS ${ClickHouse_BINARY_DIR}/contrib/llvm/llvm)
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR)
message(WARNING "Option ENABLE_EMBEDDED_COMPILER is set but LLVM library cannot build if build directory is the same as source directory.")
set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0)
elseif (SPLIT_SHARED_LIBRARIES)
# llvm-tablegen cannot find shared libraries that we build. Probably can be easily fixed.
message(WARNING "Option ENABLE_EMBEDDED_COMPILER is not compatible with SPLIT_SHARED_LIBRARIES. Build of LLVM will be disabled.")
set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0)
elseif (NOT ARCH_AMD64)
# It's not supported yet, but you can help.
message(WARNING "Option ENABLE_EMBEDDED_COMPILER is only available for x86_64. Build of LLVM will be disabled.")
set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0)
elseif (SANITIZE STREQUAL "undefined")
# llvm-tblgen, that is used during LLVM build, doesn't work with UBSan.
message(WARNING "Option ENABLE_EMBEDDED_COMPILER does not work with UBSan, because 'llvm-tblgen' tool from LLVM has undefined behaviour. Build of LLVM will be disabled.")
set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0)
else ()
set (LLVM_FOUND 1)
set (USE_EMBEDDED_COMPILER 1)
set (LLVM_VERSION "9.0.0bundled")
set (LLVM_INCLUDE_DIRS
${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/include
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/include
)
set (LLVM_LIBRARY_DIRS ${ClickHouse_BINARY_DIR}/contrib/llvm/llvm)
endif()
endif()
if (LLVM_FOUND)
message(STATUS "LLVM version: ${LLVM_PACKAGE_VERSION}")
message(STATUS "LLVM include Directory: ${LLVM_INCLUDE_DIRS}")
message(STATUS "LLVM library Directory: ${LLVM_LIBRARY_DIRS}")
message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}")
@ -82,16 +75,53 @@ if (ENABLE_EMBEDDED_COMPILER)
endif()
function(llvm_libs_all REQUIRED_LLVM_LIBRARIES)
llvm_map_components_to_libnames (result all)
if (USE_STATIC_LIBRARIES OR NOT "LLVM" IN_LIST result)
list (REMOVE_ITEM result "LTO" "LLVM")
else()
set (result "LLVM")
endif ()
if (TERMCAP_LIBRARY)
list (APPEND result ${TERMCAP_LIBRARY})
endif ()
list (APPEND result ${CMAKE_DL_LIBS} ${ZLIB_LIBRARIES})
set (${REQUIRED_LLVM_LIBRARIES} ${result} PARENT_SCOPE)
endfunction()
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
set (REQUIRED_LLVM_LIBRARIES
LLVMOrcJIT
LLVMExecutionEngine
LLVMRuntimeDyld
LLVMX86CodeGen
LLVMX86Desc
LLVMX86Info
LLVMX86Utils
LLVMAsmPrinter
LLVMDebugInfoDWARF
LLVMGlobalISel
LLVMSelectionDAG
LLVMMCDisassembler
LLVMPasses
LLVMCodeGen
LLVMipo
LLVMBitWriter
LLVMInstrumentation
LLVMScalarOpts
LLVMAggressiveInstCombine
LLVMInstCombine
LLVMVectorize
LLVMTransformUtils
LLVMTarget
LLVMAnalysis
LLVMProfileData
LLVMObject
LLVMBitReader
LLVMCore
LLVMRemarks
LLVMBitstreamReader
LLVMMCParser
LLVMMC
LLVMBinaryFormat
LLVMDebugInfoCodeView
LLVMSupport
LLVMDemangle
)
#function(llvm_libs_all REQUIRED_LLVM_LIBRARIES)
# llvm_map_components_to_libnames (result all)
# if (USE_STATIC_LIBRARIES OR NOT "LLVM" IN_LIST result)
# list (REMOVE_ITEM result "LTO" "LLVM")
# else()
# set (result "LLVM")
# endif ()
# list (APPEND result ${CMAKE_DL_LIBS} ${ZLIB_LIBRARIES})
# set (${REQUIRED_LLVM_LIBRARIES} ${result} PARENT_SCOPE)
#endfunction()

View File

@ -1,4 +1,4 @@
if(NOT OS_FREEBSD AND NOT APPLE)
if(NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_ARM)
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
endif()

View File

@ -18,6 +18,14 @@ message(STATUS "Default libraries: ${DEFAULT_LIBS}")
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
# glibc-compatibility library relies to fixed version of libc headers
# (because minor changes in function attributes between different glibc versions will introduce incompatibilities)
# This is for x86_64. For other architectures we have separate toolchains.
if (ARCH_AMD64)
set(CMAKE_C_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers)
set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers)
endif ()
# Global libraries
add_library(global-libs INTERFACE)

View File

@ -76,6 +76,9 @@ if (SANITIZE)
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan")
endif ()
# llvm-tblgen, that is used during LLVM build, doesn't work with UBSan.
set (ENABLE_EMBEDDED_COMPILER 0 CACHE BOOL "")
elseif (SANITIZE STREQUAL "libfuzzer")
# NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends.
# NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them (tests) have entry point for fuzzer and it's not checked.

View File

@ -66,34 +66,19 @@ if (USE_INTERNAL_ZLIB_LIBRARY)
endif ()
add_subdirectory (${INTERNAL_ZLIB_NAME})
# TODO: make pull to Dead2/zlib-ng and remove:
# We should use same defines when including zlib.h as used when zlib compiled
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "AMD64")
if (ARCH_AMD64 OR ARCH_AARCH64)
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK)
endif ()
#set_target_properties(example PROPERTIES EXCLUDE_FROM_ALL 1)
#if (TARGET example64)
# set_target_properties(example64 PROPERTIES EXCLUDE_FROM_ALL 1)
#endif ()
#set_target_properties(minigzip PROPERTIES EXCLUDE_FROM_ALL 1)
#if (TARGET minigzip64)
# set_target_properties(minigzip64 PROPERTIES EXCLUDE_FROM_ALL 1)
#endif ()
endif ()
if (USE_INTERNAL_CCTZ_LIBRARY)
add_subdirectory (cctz-cmake)
endif ()
if (ENABLE_TCMALLOC AND USE_INTERNAL_GPERFTOOLS_LIBRARY)
add_subdirectory (libtcmalloc)
endif ()
if (ENABLE_JEMALLOC AND USE_INTERNAL_JEMALLOC_LIBRARY)
add_subdirectory (jemalloc-cmake)
endif ()
@ -175,10 +160,7 @@ if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
set (ARROW_VERBOSE_THIRDPARTY_BUILD ON CACHE INTERNAL "")
set (ARROW_BUILD_SHARED 1 CACHE INTERNAL "")
set (ARROW_BOOST_HEADER_ONLY ON CACHE INTERNAL "")
#set (BOOST_INCLUDEDIR Boost_INCLUDE_DIRS)
set (Boost_FOUND 1 CACHE INTERNAL "")
#set (ZLIB_HOME ${ZLIB_INCLUDE_DIR})
#set (ZLIB_FOUND 1)
if (MAKE_STATIC_LIBRARIES)
set (PARQUET_ARROW_LINKAGE "static" CACHE INTERNAL "")
set (ARROW_TEST_LINKAGE "static" CACHE INTERNAL "")
@ -218,6 +200,11 @@ else()
endif()
add_subdirectory(arrow-cmake)
# The library is large - avoid bloat.
target_compile_options (${ARROW_LIBRARY} PRIVATE -g0)
target_compile_options (${THRIFT_LIBRARY} PRIVATE -g0)
target_compile_options (${PARQUET_LIBRARY} PRIVATE -g0)
endif()
endif()
@ -254,28 +241,14 @@ elseif(GTEST_SRC_DIR)
target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0)
endif()
if (USE_INTERNAL_LLVM_LIBRARY)
file(GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp CONTENT " ")
add_library(LLVM0 ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp) # silly cmake bug fix
add_library(LLVMOFF ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp)
if (USE_EMBEDDED_COMPILER AND USE_INTERNAL_LLVM_LIBRARY)
# ld: unknown option: --color-diagnostics
if (APPLE)
set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "")
endif ()
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "")
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
set (LLVM_INCLUDE_TESTS 0 CACHE INTERNAL "")
set (LLVM_INCLUDE_EXAMPLES 0 CACHE INTERNAL "")
set (LLVM_INCLUDE_TOOLS 0 CACHE INTERNAL "")
set (LLVM_INSTALL_TOOLCHAIN_ONLY 0 CACHE INTERNAL "")
set (CLANG_BUILT_STANDALONE 0 CACHE INTERNAL "")
set (LLDB_BUILT_STANDALONE 0 CACHE INTERNAL "")
set (CLANG_ENABLE_STATIC_ANALYZER 0 CACHE INTERNAL "")
set (CLANG_ENABLE_ARCMT 0 CACHE INTERNAL "")
set (CLANG_BUILD_TOOLS 0 CACHE INTERNAL "")
set (BENCHMARK_ENABLE_GTEST_TESTS 0 CACHE INTERNAL "")
set (BENCHMARK_ENABLE_ASSEMBLY_TESTS 0 CACHE INTERNAL "")
set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE INTERNAL "")
set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE STRING "")
add_subdirectory (llvm/llvm)
endif ()
@ -320,6 +293,11 @@ if (USE_INTERNAL_AWS_S3_LIBRARY)
set (CMAKE_REQUIRED_FLAGS ${save_CMAKE_REQUIRED_FLAGS})
set (CMAKE_CMAKE_MODULE_PATH ${save_CMAKE_MODULE_PATH})
add_subdirectory(aws-s3-cmake)
# The library is large - avoid bloat.
target_compile_options (aws_s3 PRIVATE -g0)
target_compile_options (aws_s3_checksums PRIVATE -g0)
target_compile_options (libcurl PRIVATE -g0)
endif ()
if (USE_BASE64)
@ -328,6 +306,13 @@ endif()
if (USE_INTERNAL_HYPERSCAN_LIBRARY)
add_subdirectory (hyperscan)
# The library is large - avoid bloat.
if (USE_STATIC_LIBRARIES)
target_compile_options (hs PRIVATE -g0)
else ()
target_compile_options (hs_shared PRIVATE -g0)
endif ()
endif()
if (USE_SIMDJSON)
@ -341,7 +326,3 @@ endif()
if (USE_FASTOPS)
add_subdirectory (fastops-cmake)
endif()
#if (USE_INTERNAL_ORC_LIBRARY)
# add_subdirectory(orc-cmake)
#endif ()

View File

@ -54,17 +54,6 @@ set_target_properties(capnp
)
target_link_libraries(capnp PUBLIC kj)
# The library has substandard code
if (COMPILER_GCC)
set (SUPPRESS_WARNINGS -Wno-non-virtual-dtor -Wno-sign-compare -Wno-strict-aliasing -Wno-maybe-uninitialized
-Wno-deprecated-declarations -Wno-class-memaccess)
elseif (COMPILER_CLANG)
set (SUPPRESS_WARNINGS -Wno-non-virtual-dtor -Wno-sign-compare -Wno-strict-aliasing -Wno-deprecated-declarations)
endif ()
target_compile_options(kj PRIVATE ${SUPPRESS_WARNINGS})
target_compile_options(capnp PRIVATE ${SUPPRESS_WARNINGS})
set (CAPNPC_SRCS
${CAPNPROTO_SOURCE_DIR}/capnp/compiler/type-id.c++
${CAPNPROTO_SOURCE_DIR}/capnp/compiler/error-reporter.c++
@ -80,3 +69,15 @@ set (CAPNPC_SRCS
add_library(capnpc ${CAPNPC_SRCS})
target_link_libraries(capnpc PUBLIC capnp)
# The library has substandard code
if (COMPILER_GCC)
set (SUPPRESS_WARNINGS -Wno-non-virtual-dtor -Wno-sign-compare -Wno-strict-aliasing -Wno-maybe-uninitialized
-Wno-deprecated-declarations -Wno-class-memaccess)
elseif (COMPILER_CLANG)
set (SUPPRESS_WARNINGS -Wno-non-virtual-dtor -Wno-sign-compare -Wno-strict-aliasing -Wno-deprecated-declarations)
endif ()
target_compile_options(kj PRIVATE ${SUPPRESS_WARNINGS})
target_compile_options(capnp PRIVATE ${SUPPRESS_WARNINGS})
target_compile_options(capnpc PRIVATE ${SUPPRESS_WARNINGS})

View File

@ -1,6 +1,6 @@
add_library(roaring
roaring.c
roaring/roaring.h
roaring/roaring.hh)
roaring.c
roaring/roaring.h
roaring/roaring.hh)
target_include_directories (roaring PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories (roaring SYSTEM PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})

View File

@ -3,9 +3,4 @@ add_library(btrie
include/btrie.h
)
target_include_directories (btrie PUBLIC include)
if (ENABLE_TESTS)
add_executable(test_btrie test/test_btrie.c)
target_link_libraries(test_btrie btrie)
endif ()
target_include_directories (btrie SYSTEM PUBLIC include)

1
contrib/libc-headers vendored Submodule

@ -0,0 +1 @@
Subproject commit cd82fd9d8eefe50a47a0adf7c617c3ea7d558d11

2
contrib/libcxx vendored

@ -1 +1 @@
Subproject commit 9807685d51db467e097ad5eb8d5c2c16922794b2
Subproject commit f7c63235238a71b7e0563fab8c7c5ec1b54831f6

View File

@ -1,41 +1,45 @@
include(CheckCXXCompilerFlag)
set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx)
set(SRCS
${LIBCXX_SOURCE_DIR}/src/optional.cpp
${LIBCXX_SOURCE_DIR}/src/variant.cpp
${LIBCXX_SOURCE_DIR}/src/chrono.cpp
${LIBCXX_SOURCE_DIR}/src/thread.cpp
${LIBCXX_SOURCE_DIR}/src/experimental/memory_resource.cpp
${LIBCXX_SOURCE_DIR}/src/iostream.cpp
${LIBCXX_SOURCE_DIR}/src/strstream.cpp
${LIBCXX_SOURCE_DIR}/src/ios.cpp
${LIBCXX_SOURCE_DIR}/src/future.cpp
${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp
${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp
${LIBCXX_SOURCE_DIR}/src/hash.cpp
${LIBCXX_SOURCE_DIR}/src/string.cpp
${LIBCXX_SOURCE_DIR}/src/debug.cpp
${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp
${LIBCXX_SOURCE_DIR}/src/utility.cpp
${LIBCXX_SOURCE_DIR}/src/any.cpp
${LIBCXX_SOURCE_DIR}/src/exception.cpp
${LIBCXX_SOURCE_DIR}/src/memory.cpp
${LIBCXX_SOURCE_DIR}/src/new.cpp
${LIBCXX_SOURCE_DIR}/src/valarray.cpp
${LIBCXX_SOURCE_DIR}/src/vector.cpp
${LIBCXX_SOURCE_DIR}/src/algorithm.cpp
${LIBCXX_SOURCE_DIR}/src/functional.cpp
${LIBCXX_SOURCE_DIR}/src/regex.cpp
${LIBCXX_SOURCE_DIR}/src/any.cpp
${LIBCXX_SOURCE_DIR}/src/bind.cpp
${LIBCXX_SOURCE_DIR}/src/mutex.cpp
${LIBCXX_SOURCE_DIR}/src/charconv.cpp
${LIBCXX_SOURCE_DIR}/src/typeinfo.cpp
${LIBCXX_SOURCE_DIR}/src/locale.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp
${LIBCXX_SOURCE_DIR}/src/chrono.cpp
${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp
${LIBCXX_SOURCE_DIR}/src/condition_variable_destructor.cpp
${LIBCXX_SOURCE_DIR}/src/debug.cpp
${LIBCXX_SOURCE_DIR}/src/exception.cpp
${LIBCXX_SOURCE_DIR}/src/experimental/memory_resource.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp
${LIBCXX_SOURCE_DIR}/src/system_error.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp
${LIBCXX_SOURCE_DIR}/src/functional.cpp
${LIBCXX_SOURCE_DIR}/src/future.cpp
${LIBCXX_SOURCE_DIR}/src/hash.cpp
${LIBCXX_SOURCE_DIR}/src/ios.cpp
${LIBCXX_SOURCE_DIR}/src/iostream.cpp
${LIBCXX_SOURCE_DIR}/src/locale.cpp
${LIBCXX_SOURCE_DIR}/src/memory.cpp
${LIBCXX_SOURCE_DIR}/src/mutex.cpp
${LIBCXX_SOURCE_DIR}/src/mutex_destructor.cpp
${LIBCXX_SOURCE_DIR}/src/new.cpp
${LIBCXX_SOURCE_DIR}/src/optional.cpp
${LIBCXX_SOURCE_DIR}/src/random.cpp
${LIBCXX_SOURCE_DIR}/src/regex.cpp
${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp
${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp
${LIBCXX_SOURCE_DIR}/src/string.cpp
${LIBCXX_SOURCE_DIR}/src/strstream.cpp
${LIBCXX_SOURCE_DIR}/src/system_error.cpp
${LIBCXX_SOURCE_DIR}/src/thread.cpp
${LIBCXX_SOURCE_DIR}/src/typeinfo.cpp
${LIBCXX_SOURCE_DIR}/src/utility.cpp
${LIBCXX_SOURCE_DIR}/src/valarray.cpp
${LIBCXX_SOURCE_DIR}/src/variant.cpp
${LIBCXX_SOURCE_DIR}/src/vector.cpp
)
add_library(cxx ${SRCS})
@ -43,8 +47,15 @@ add_library(cxx ${SRCS})
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>)
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
target_compile_options(cxx PUBLIC -nostdinc++ -Wno-reserved-id-macro)
if (OS_DARWIN AND (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9) AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11))
target_compile_options(cxx PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++>)
check_cxx_compiler_flag(-Wreserved-id-macro HAVE_WARNING_RESERVED_ID_MACRO)
if (HAVE_WARNING_RESERVED_ID_MACRO)
target_compile_options(cxx PUBLIC -Wno-reserved-id-macro)
endif ()
check_cxx_compiler_flag(-Wctad-maybe-unsupported HAVE_WARNING_CTAD_MAYBE_UNSUPPORTED)
if (HAVE_WARNING_CTAD_MAYBE_UNSUPPORTED)
target_compile_options(cxx PUBLIC -Wno-ctad-maybe-unsupported)
endif ()

2
contrib/libcxxabi vendored

@ -1 +1 @@
Subproject commit d56efcc7a52739518dbe7df9e743073e00951fa1
Subproject commit c26cf36f8387c5edf2cabb4a630f0975c35aa9fb

View File

@ -1,2 +0,0 @@
google-perftools@googlegroups.com

View File

@ -1,80 +0,0 @@
message (STATUS "Building: tcmalloc_minimal_internal")
add_library (tcmalloc_minimal_internal
./src/malloc_hook.cc
./src/base/spinlock_internal.cc
./src/base/spinlock.cc
./src/base/dynamic_annotations.c
./src/base/linuxthreads.cc
./src/base/elf_mem_image.cc
./src/base/vdso_support.cc
./src/base/sysinfo.cc
./src/base/low_level_alloc.cc
./src/base/thread_lister.c
./src/base/logging.cc
./src/base/atomicops-internals-x86.cc
./src/memfs_malloc.cc
./src/tcmalloc.cc
./src/malloc_extension.cc
./src/thread_cache.cc
./src/symbolize.cc
./src/page_heap.cc
./src/maybe_threads.cc
./src/central_freelist.cc
./src/static_vars.cc
./src/sampler.cc
./src/internal_logging.cc
./src/system-alloc.cc
./src/span.cc
./src/common.cc
./src/stacktrace.cc
./src/stack_trace_table.cc
./src/heap-checker.cc
./src/heap-checker-bcad.cc
./src/heap-profile-table.cc
./src/raw_printer.cc
./src/memory_region_map.cc
)
target_compile_options (tcmalloc_minimal_internal
PRIVATE
-DNO_TCMALLOC_SAMPLES
-DNDEBUG
-DNO_FRAME_POINTER
-Wwrite-strings
-Wno-sign-compare
-Wno-unused-result
-Wno-deprecated-declarations
-Wno-unused-function
-Wno-unused-private-field
PUBLIC
-fno-builtin-malloc
-fno-builtin-free
-fno-builtin-realloc
-fno-builtin-calloc
-fno-builtin-cfree
-fno-builtin-memalign
-fno-builtin-posix_memalign
-fno-builtin-valloc
-fno-builtin-pvalloc
)
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 3.9.1)
target_compile_options(tcmalloc_minimal_internal PUBLIC -Wno-dynamic-exception-spec )
endif ()
if (CMAKE_SYSTEM MATCHES "FreeBSD" AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
target_compile_options(tcmalloc_minimal_internal PUBLIC -Wno-unused-but-set-variable)
endif ()
if (CMAKE_SYSTEM MATCHES "FreeBSD")
target_compile_definitions(tcmalloc_minimal_internal PUBLIC _GNU_SOURCE)
endif ()
target_include_directories (tcmalloc_minimal_internal PUBLIC include)
target_include_directories (tcmalloc_minimal_internal PRIVATE src)
find_package (Threads)
target_link_libraries (tcmalloc_minimal_internal ${CMAKE_THREAD_LIBS_INIT})

View File

@ -1,28 +0,0 @@
Copyright (c) 2005, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,8 +0,0 @@
https://github.com/gperftools/gperftools/commit/dde32f8bbc95312379f9f5a651799815bb6327c5
Several modifications:
1. Disabled TCMALLOC_AGGRESSIVE_DECOMMIT by default. It is important.
2. Using only files for tcmalloc_minimal build (./configure --enable-minimal).
3. Using some compiler flags from project.
4. Removed warning about unused variable when build with NDEBUG (by default).
5. Including config.h with relative path.

View File

@ -1,422 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Maxim Lifantsev (with design ideas by Sanjay Ghemawat)
//
//
// Module for detecing heap (memory) leaks.
//
// For full(er) information, see doc/heap_checker.html
//
// This module can be linked into programs with
// no slowdown caused by this unless you activate the leak-checker:
//
// 1. Set the environment variable HEAPCHEK to _type_ before
// running the program.
//
// _type_ is usually "normal" but can also be "minimal", "strict", or
// "draconian". (See the html file for other options, like 'local'.)
//
// After that, just run your binary. If the heap-checker detects
// a memory leak at program-exit, it will print instructions on how
// to track down the leak.
#ifndef BASE_HEAP_CHECKER_H_
#define BASE_HEAP_CHECKER_H_
#include <sys/types.h> // for size_t
// I can't #include config.h in this public API file, but I should
// really use configure (and make malloc_extension.h a .in file) to
// figure out if the system has stdint.h or not. But I'm lazy, so
// for now I'm assuming it's a problem only with MSVC.
#ifndef _MSC_VER
#include <stdint.h> // for uintptr_t
#endif
#include <stdarg.h> // for va_list
#include <vector>
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
// The class is thread-safe with respect to all the provided static methods,
// as well as HeapLeakChecker objects: they can be accessed by multiple threads.
class PERFTOOLS_DLL_DECL HeapLeakChecker {
public:
// ----------------------------------------------------------------------- //
// Static functions for working with (whole-program) leak checking.
// If heap leak checking is currently active in some mode
// e.g. if leak checking was started (and is still active now)
// due to HEAPCHECK=... defined in the environment.
// The return value reflects iff HeapLeakChecker objects manually
// constructed right now will be doing leak checking or nothing.
// Note that we can go from active to inactive state during InitGoogle()
// if FLAGS_heap_check gets set to "" by some code before/during InitGoogle().
static bool IsActive();
// Return pointer to the whole-program checker if it has been created
// and NULL otherwise.
// Once GlobalChecker() returns non-NULL that object will not disappear and
// will be returned by all later GlobalChecker calls.
// This is mainly to access BytesLeaked() and ObjectsLeaked() (see below)
// for the whole-program checker after one calls NoGlobalLeaks()
// or similar and gets false.
static HeapLeakChecker* GlobalChecker();
// Do whole-program leak check now (if it was activated for this binary);
// return false only if it was activated and has failed.
// The mode of the check is controlled by the command-line flags.
// This method can be called repeatedly.
// Things like GlobalChecker()->SameHeap() can also be called explicitly
// to do the desired flavor of the check.
static bool NoGlobalLeaks();
// If whole-program checker if active,
// cancel its automatic execution after main() exits.
// This requires that some leak check (e.g. NoGlobalLeaks())
// has been called at least once on the whole-program checker.
static void CancelGlobalCheck();
// ----------------------------------------------------------------------- //
// Non-static functions for starting and doing leak checking.
// Start checking and name the leak check performed.
// The name is used in naming dumped profiles
// and needs to be unique only within your binary.
// It must also be a string that can be a part of a file name,
// in particular not contain path expressions.
explicit HeapLeakChecker(const char *name);
// Destructor (verifies that some *NoLeaks or *SameHeap method
// has been called at least once).
~HeapLeakChecker();
// These used to be different but are all the same now: they return
// true iff all memory allocated since this HeapLeakChecker object
// was constructor is still reachable from global state.
//
// Because we fork to convert addresses to symbol-names, and forking
// is not thread-safe, and we may be called in a threaded context,
// we do not try to symbolize addresses when called manually.
bool NoLeaks() { return DoNoLeaks(DO_NOT_SYMBOLIZE); }
// These forms are obsolete; use NoLeaks() instead.
// TODO(csilvers): mark as DEPRECATED.
bool QuickNoLeaks() { return NoLeaks(); }
bool BriefNoLeaks() { return NoLeaks(); }
bool SameHeap() { return NoLeaks(); }
bool QuickSameHeap() { return NoLeaks(); }
bool BriefSameHeap() { return NoLeaks(); }
// Detailed information about the number of leaked bytes and objects
// (both of these can be negative as well).
// These are available only after a *SameHeap or *NoLeaks
// method has been called.
// Note that it's possible for both of these to be zero
// while SameHeap() or NoLeaks() returned false in case
// of a heap state change that is significant
// but preserves the byte and object counts.
ssize_t BytesLeaked() const;
ssize_t ObjectsLeaked() const;
// ----------------------------------------------------------------------- //
// Static helpers to make us ignore certain leaks.
// Scoped helper class. Should be allocated on the stack inside a
// block of code. Any heap allocations done in the code block
// covered by the scoped object (including in nested function calls
// done by the code block) will not be reported as leaks. This is
// the recommended replacement for the GetDisableChecksStart() and
// DisableChecksToHereFrom() routines below.
//
// Example:
// void Foo() {
// HeapLeakChecker::Disabler disabler;
// ... code that allocates objects whose leaks should be ignored ...
// }
//
// REQUIRES: Destructor runs in same thread as constructor
class Disabler {
public:
Disabler();
~Disabler();
private:
Disabler(const Disabler&); // disallow copy
void operator=(const Disabler&); // and assign
};
// Ignore an object located at 'ptr' (can go at the start or into the object)
// as well as all heap objects (transitively) referenced from it for the
// purposes of heap leak checking. Returns 'ptr' so that one can write
// static T* obj = IgnoreObject(new T(...));
//
// If 'ptr' does not point to an active allocated object at the time of this
// call, it is ignored; but if it does, the object must not get deleted from
// the heap later on.
//
// See also HiddenPointer, below, if you need to prevent a pointer from
// being traversed by the heap checker but do not wish to transitively
// whitelist objects referenced through it.
template <typename T>
static T* IgnoreObject(T* ptr) {
DoIgnoreObject(static_cast<const void*>(const_cast<const T*>(ptr)));
return ptr;
}
// Undo what an earlier IgnoreObject() call promised and asked to do.
// At the time of this call 'ptr' must point at or inside of an active
// allocated object which was previously registered with IgnoreObject().
static void UnIgnoreObject(const void* ptr);
// ----------------------------------------------------------------------- //
// Internal types defined in .cc
class Allocator;
struct RangeValue;
private:
// ----------------------------------------------------------------------- //
// Various helpers
// Create the name of the heap profile file.
// Should be deleted via Allocator::Free().
char* MakeProfileNameLocked();
// Helper for constructors
void Create(const char *name, bool make_start_snapshot);
enum ShouldSymbolize { SYMBOLIZE, DO_NOT_SYMBOLIZE };
// Helper for *NoLeaks and *SameHeap
bool DoNoLeaks(ShouldSymbolize should_symbolize);
// Helper for NoGlobalLeaks, also called by the global destructor.
static bool NoGlobalLeaksMaybeSymbolize(ShouldSymbolize should_symbolize);
// These used to be public, but they are now deprecated.
// Will remove entirely when all internal uses are fixed.
// In the meantime, use friendship so the unittest can still test them.
static void* GetDisableChecksStart();
static void DisableChecksToHereFrom(const void* start_address);
static void DisableChecksIn(const char* pattern);
friend void RangeDisabledLeaks();
friend void NamedTwoDisabledLeaks();
friend void* RunNamedDisabledLeaks(void*);
friend void TestHeapLeakCheckerNamedDisabling();
// Actually implements IgnoreObject().
static void DoIgnoreObject(const void* ptr);
// Disable checks based on stack trace entry at a depth <=
// max_depth. Used to hide allocations done inside some special
// libraries.
static void DisableChecksFromToLocked(const void* start_address,
const void* end_address,
int max_depth);
// Helper for DoNoLeaks to ignore all objects reachable from all live data
static void IgnoreAllLiveObjectsLocked(const void* self_stack_top);
// Callback we pass to TCMalloc_ListAllProcessThreads (see thread_lister.h)
// that is invoked when all threads of our process are found and stopped.
// The call back does the things needed to ignore live data reachable from
// thread stacks and registers for all our threads
// as well as do other global-live-data ignoring
// (via IgnoreNonThreadLiveObjectsLocked)
// during the quiet state of all threads being stopped.
// For the argument meaning see the comment by TCMalloc_ListAllProcessThreads.
// Here we only use num_threads and thread_pids, that TCMalloc_ListAllProcessThreads
// fills for us with the number and pids of all the threads of our process
// it found and attached to.
static int IgnoreLiveThreadsLocked(void* parameter,
int num_threads,
pid_t* thread_pids,
va_list ap);
// Helper for IgnoreAllLiveObjectsLocked and IgnoreLiveThreadsLocked
// that we prefer to execute from IgnoreLiveThreadsLocked
// while all threads are stopped.
// This helper does live object discovery and ignoring
// for all objects that are reachable from everything
// not related to thread stacks and registers.
static void IgnoreNonThreadLiveObjectsLocked();
// Helper for IgnoreNonThreadLiveObjectsLocked and IgnoreLiveThreadsLocked
// to discover and ignore all heap objects
// reachable from currently considered live objects
// (live_objects static global variable in out .cc file).
// "name", "name2" are two strings that we print one after another
// in a debug message to describe what kind of live object sources
// are being used.
static void IgnoreLiveObjectsLocked(const char* name, const char* name2);
// Do the overall whole-program heap leak check if needed;
// returns true when did the leak check.
static bool DoMainHeapCheck();
// Type of task for UseProcMapsLocked
enum ProcMapsTask {
RECORD_GLOBAL_DATA,
DISABLE_LIBRARY_ALLOCS
};
// Success/Error Return codes for UseProcMapsLocked.
enum ProcMapsResult {
PROC_MAPS_USED,
CANT_OPEN_PROC_MAPS,
NO_SHARED_LIBS_IN_PROC_MAPS
};
// Read /proc/self/maps, parse it, and do the 'proc_maps_task' for each line.
static ProcMapsResult UseProcMapsLocked(ProcMapsTask proc_maps_task);
// A ProcMapsTask to disable allocations from 'library'
// that is mapped to [start_address..end_address)
// (only if library is a certain system library).
static void DisableLibraryAllocsLocked(const char* library,
uintptr_t start_address,
uintptr_t end_address);
// Return true iff "*ptr" points to a heap object
// ("*ptr" can point at the start or inside of a heap object
// so that this works e.g. for pointers to C++ arrays, C++ strings,
// multiple-inherited objects, or pointers to members).
// We also fill *object_size for this object then
// and we move "*ptr" to point to the very start of the heap object.
static inline bool HaveOnHeapLocked(const void** ptr, size_t* object_size);
// Helper to shutdown heap leak checker when it's not needed
// or can't function properly.
static void TurnItselfOffLocked();
// Internally-used c-tor to start whole-executable checking.
HeapLeakChecker();
// ----------------------------------------------------------------------- //
// Friends and externally accessed helpers.
// Helper for VerifyHeapProfileTableStackGet in the unittest
// to get the recorded allocation caller for ptr,
// which must be a heap object.
static const void* GetAllocCaller(void* ptr);
friend void VerifyHeapProfileTableStackGet();
// This gets to execute before constructors for all global objects
static void BeforeConstructorsLocked();
friend void HeapLeakChecker_BeforeConstructors();
// This gets to execute after destructors for all global objects
friend void HeapLeakChecker_AfterDestructors();
// Full starting of recommended whole-program checking.
friend void HeapLeakChecker_InternalInitStart();
// Runs REGISTER_HEAPCHECK_CLEANUP cleanups and potentially
// calls DoMainHeapCheck
friend void HeapLeakChecker_RunHeapCleanups();
// ----------------------------------------------------------------------- //
// Member data.
class SpinLock* lock_; // to make HeapLeakChecker objects thread-safe
const char* name_; // our remembered name (we own it)
// NULL means this leak checker is a noop
// Snapshot taken when the checker was created. May be NULL
// for the global heap checker object. We use void* instead of
// HeapProfileTable::Snapshot* to avoid including heap-profile-table.h.
void* start_snapshot_;
bool has_checked_; // if we have done the leak check, so these are ready:
ssize_t inuse_bytes_increase_; // bytes-in-use increase for this checker
ssize_t inuse_allocs_increase_; // allocations-in-use increase
// for this checker
bool keep_profiles_; // iff we should keep the heap profiles we've made
// ----------------------------------------------------------------------- //
// Disallow "evil" constructors.
HeapLeakChecker(const HeapLeakChecker&);
void operator=(const HeapLeakChecker&);
};
// Holds a pointer that will not be traversed by the heap checker.
// Contrast with HeapLeakChecker::IgnoreObject(o), in which o and
// all objects reachable from o are ignored by the heap checker.
template <class T>
class HiddenPointer {
public:
explicit HiddenPointer(T* t)
: masked_t_(reinterpret_cast<uintptr_t>(t) ^ kHideMask) {
}
// Returns unhidden pointer. Be careful where you save the result.
T* get() const { return reinterpret_cast<T*>(masked_t_ ^ kHideMask); }
private:
// Arbitrary value, but not such that xor'ing with it is likely
// to map one valid pointer to another valid pointer:
static const uintptr_t kHideMask =
static_cast<uintptr_t>(0xF03A5F7BF03A5F7Bll);
uintptr_t masked_t_;
};
// A class that exists solely to run its destructor. This class should not be
// used directly, but instead by the REGISTER_HEAPCHECK_CLEANUP macro below.
class PERFTOOLS_DLL_DECL HeapCleaner {
public:
typedef void (*void_function)(void);
HeapCleaner(void_function f);
static void RunHeapCleanups();
private:
static std::vector<void_function>* heap_cleanups_;
};
// A macro to declare module heap check cleanup tasks
// (they run only if we are doing heap leak checking.)
// 'body' should be the cleanup code to run. 'name' doesn't matter,
// but must be unique amongst all REGISTER_HEAPCHECK_CLEANUP calls.
#define REGISTER_HEAPCHECK_CLEANUP(name, body) \
namespace { \
void heapcheck_cleanup_##name() { body; } \
static HeapCleaner heapcheck_cleaner_##name(&heapcheck_cleanup_##name); \
}
#endif // BASE_HEAP_CHECKER_H_

View File

@ -1,105 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*
* Module for heap-profiling.
*
* For full(er) information, see doc/heapprofile.html
*
* This module can be linked into your program with
* no slowdown caused by this unless you activate the profiler
* using one of the following methods:
*
* 1. Before starting the program, set the environment variable
* "HEAPPROFILE" to be the name of the file to which the profile
* data should be written.
*
* 2. Programmatically, start and stop the profiler using the
* routines "HeapProfilerStart(filename)" and "HeapProfilerStop()".
*
*/
#ifndef BASE_HEAP_PROFILER_H_
#define BASE_HEAP_PROFILER_H_
#include <stddef.h>
/* Annoying stuff for windows; makes sure clients can import these functions */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
/* All this code should be usable from within C apps. */
#ifdef __cplusplus
extern "C" {
#endif
/* Start profiling and arrange to write profile data to file names
* of the form: "prefix.0000", "prefix.0001", ...
*/
PERFTOOLS_DLL_DECL void HeapProfilerStart(const char* prefix);
/* Returns non-zero if we are currently profiling the heap. (Returns
* an int rather than a bool so it's usable from C.) This is true
* between calls to HeapProfilerStart() and HeapProfilerStop(), and
* also if the program has been run with HEAPPROFILER, or some other
* way to turn on whole-program profiling.
*/
int IsHeapProfilerRunning();
/* Stop heap profiling. Can be restarted again with HeapProfilerStart(),
* but the currently accumulated profiling information will be cleared.
*/
PERFTOOLS_DLL_DECL void HeapProfilerStop();
/* Dump a profile now - can be used for dumping at a hopefully
* quiescent state in your program, in order to more easily track down
* memory leaks. Will include the reason in the logged message
*/
PERFTOOLS_DLL_DECL void HeapProfilerDump(const char *reason);
/* Generate current heap profiling information.
* Returns an empty string when heap profiling is not active.
* The returned pointer is a '\0'-terminated string allocated using malloc()
* and should be free()-ed as soon as the caller does not need it anymore.
*/
PERFTOOLS_DLL_DECL char* GetHeapProfile();
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* BASE_HEAP_PROFILER_H_ */

View File

@ -1,434 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
//
// Extra extensions exported by some malloc implementations. These
// extensions are accessed through a virtual base class so an
// application can link against a malloc that does not implement these
// extensions, and it will get default versions that do nothing.
//
// NOTE FOR C USERS: If you wish to use this functionality from within
// a C program, see malloc_extension_c.h.
#ifndef BASE_MALLOC_EXTENSION_H_
#define BASE_MALLOC_EXTENSION_H_
#include <stddef.h>
// I can't #include config.h in this public API file, but I should
// really use configure (and make malloc_extension.h a .in file) to
// figure out if the system has stdint.h or not. But I'm lazy, so
// for now I'm assuming it's a problem only with MSVC.
#ifndef _MSC_VER
#include <stdint.h>
#endif
#include <string>
#include <vector>
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
static const int kMallocHistogramSize = 64;
// One day, we could support other types of writers (perhaps for C?)
typedef std::string MallocExtensionWriter;
namespace base {
struct MallocRange;
}
// Interface to a pluggable system allocator.
class PERFTOOLS_DLL_DECL SysAllocator {
public:
SysAllocator() {
}
virtual ~SysAllocator();
// Allocates "size"-byte of memory from system aligned with "alignment".
// Returns NULL if failed. Otherwise, the returned pointer p up to and
// including (p + actual_size -1) have been allocated.
virtual void* Alloc(size_t size, size_t *actual_size, size_t alignment) = 0;
};
// The default implementations of the following routines do nothing.
// All implementations should be thread-safe; the current one
// (TCMallocImplementation) is.
class PERFTOOLS_DLL_DECL MallocExtension {
public:
virtual ~MallocExtension();
// Call this very early in the program execution -- say, in a global
// constructor -- to set up parameters and state needed by all
// instrumented malloc implemenatations. One example: this routine
// sets environemnt variables to tell STL to use libc's malloc()
// instead of doing its own memory management. This is safe to call
// multiple times, as long as each time is before threads start up.
static void Initialize();
// See "verify_memory.h" to see what these routines do
virtual bool VerifyAllMemory();
virtual bool VerifyNewMemory(const void* p);
virtual bool VerifyArrayNewMemory(const void* p);
virtual bool VerifyMallocMemory(const void* p);
virtual bool MallocMemoryStats(int* blocks, size_t* total,
int histogram[kMallocHistogramSize]);
// Get a human readable description of the following malloc data structures.
// - Total inuse memory by application.
// - Free memory(thread, central and page heap),
// - Freelist of central cache, each class.
// - Page heap freelist.
// The state is stored as a null-terminated string
// in a prefix of "buffer[0,buffer_length-1]".
// REQUIRES: buffer_length > 0.
virtual void GetStats(char* buffer, int buffer_length);
// Outputs to "writer" a sample of live objects and the stack traces
// that allocated these objects. The format of the returned output
// is equivalent to the output of the heap profiler and can
// therefore be passed to "pprof". This function is equivalent to
// ReadStackTraces. The main difference is that this function returns
// serialized data appropriately formatted for use by the pprof tool.
// NOTE: by default, tcmalloc does not do any heap sampling, and this
// function will always return an empty sample. To get useful
// data from GetHeapSample, you must also set the environment
// variable TCMALLOC_SAMPLE_PARAMETER to a value such as 524288.
virtual void GetHeapSample(MallocExtensionWriter* writer);
// Outputs to "writer" the stack traces that caused growth in the
// address space size. The format of the returned output is
// equivalent to the output of the heap profiler and can therefore
// be passed to "pprof". This function is equivalent to
// ReadHeapGrowthStackTraces. The main difference is that this function
// returns serialized data appropriately formatted for use by the
// pprof tool. (This does not depend on, or require,
// TCMALLOC_SAMPLE_PARAMETER.)
virtual void GetHeapGrowthStacks(MallocExtensionWriter* writer);
// Invokes func(arg, range) for every controlled memory
// range. *range is filled in with information about the range.
//
// This is a best-effort interface useful only for performance
// analysis. The implementation may not call func at all.
typedef void (RangeFunction)(void*, const base::MallocRange*);
virtual void Ranges(void* arg, RangeFunction func);
// -------------------------------------------------------------------
// Control operations for getting and setting malloc implementation
// specific parameters. Some currently useful properties:
//
// generic
// -------
// "generic.current_allocated_bytes"
// Number of bytes currently allocated by application
// This property is not writable.
//
// "generic.heap_size"
// Number of bytes in the heap ==
// current_allocated_bytes +
// fragmentation +
// freed memory regions
// This property is not writable.
//
// tcmalloc
// --------
// "tcmalloc.max_total_thread_cache_bytes"
// Upper limit on total number of bytes stored across all
// per-thread caches. Default: 16MB.
//
// "tcmalloc.current_total_thread_cache_bytes"
// Number of bytes used across all thread caches.
// This property is not writable.
//
// "tcmalloc.central_cache_free_bytes"
// Number of free bytes in the central cache that have been
// assigned to size classes. They always count towards virtual
// memory usage, and unless the underlying memory is swapped out
// by the OS, they also count towards physical memory usage.
// This property is not writable.
//
// "tcmalloc.transfer_cache_free_bytes"
// Number of free bytes that are waiting to be transfered between
// the central cache and a thread cache. They always count
// towards virtual memory usage, and unless the underlying memory
// is swapped out by the OS, they also count towards physical
// memory usage. This property is not writable.
//
// "tcmalloc.thread_cache_free_bytes"
// Number of free bytes in thread caches. They always count
// towards virtual memory usage, and unless the underlying memory
// is swapped out by the OS, they also count towards physical
// memory usage. This property is not writable.
//
// "tcmalloc.pageheap_free_bytes"
// Number of bytes in free, mapped pages in page heap. These
// bytes can be used to fulfill allocation requests. They
// always count towards virtual memory usage, and unless the
// underlying memory is swapped out by the OS, they also count
// towards physical memory usage. This property is not writable.
//
// "tcmalloc.pageheap_unmapped_bytes"
// Number of bytes in free, unmapped pages in page heap.
// These are bytes that have been released back to the OS,
// possibly by one of the MallocExtension "Release" calls.
// They can be used to fulfill allocation requests, but
// typically incur a page fault. They always count towards
// virtual memory usage, and depending on the OS, typically
// do not count towards physical memory usage. This property
// is not writable.
// -------------------------------------------------------------------
// Get the named "property"'s value. Returns true if the property
// is known. Returns false if the property is not a valid property
// name for the current malloc implementation.
// REQUIRES: property != NULL; value != NULL
virtual bool GetNumericProperty(const char* property, size_t* value);
// Set the named "property"'s value. Returns true if the property
// is known and writable. Returns false if the property is not a
// valid property name for the current malloc implementation, or
// is not writable.
// REQUIRES: property != NULL
virtual bool SetNumericProperty(const char* property, size_t value);
// Mark the current thread as "idle". This routine may optionally
// be called by threads as a hint to the malloc implementation that
// any thread-specific resources should be released. Note: this may
// be an expensive routine, so it should not be called too often.
//
// Also, if the code that calls this routine will go to sleep for
// a while, it should take care to not allocate anything between
// the call to this routine and the beginning of the sleep.
//
// Most malloc implementations ignore this routine.
virtual void MarkThreadIdle();
// Mark the current thread as "busy". This routine should be
// called after MarkThreadIdle() if the thread will now do more
// work. If this method is not called, performance may suffer.
//
// Most malloc implementations ignore this routine.
virtual void MarkThreadBusy();
// Gets the system allocator used by the malloc extension instance. Returns
// NULL for malloc implementations that do not support pluggable system
// allocators.
virtual SysAllocator* GetSystemAllocator();
// Sets the system allocator to the specified.
//
// Users could register their own system allocators for malloc implementation
// that supports pluggable system allocators, such as TCMalloc, by doing:
// alloc = new MyOwnSysAllocator();
// MallocExtension::instance()->SetSystemAllocator(alloc);
// It's up to users whether to fall back (recommended) to the default
// system allocator (use GetSystemAllocator() above) or not. The caller is
// responsible to any necessary locking.
// See tcmalloc/system-alloc.h for the interface and
// tcmalloc/memfs_malloc.cc for the examples.
//
// It's a no-op for malloc implementations that do not support pluggable
// system allocators.
virtual void SetSystemAllocator(SysAllocator *a);
// Try to release num_bytes of free memory back to the operating
// system for reuse. Use this extension with caution -- to get this
// memory back may require faulting pages back in by the OS, and
// that may be slow. (Currently only implemented in tcmalloc.)
virtual void ReleaseToSystem(size_t num_bytes);
// Same as ReleaseToSystem() but release as much memory as possible.
virtual void ReleaseFreeMemory();
// Sets the rate at which we release unused memory to the system.
// Zero means we never release memory back to the system. Increase
// this flag to return memory faster; decrease it to return memory
// slower. Reasonable rates are in the range [0,10]. (Currently
// only implemented in tcmalloc).
virtual void SetMemoryReleaseRate(double rate);
// Gets the release rate. Returns a value < 0 if unknown.
virtual double GetMemoryReleaseRate();
// Returns the estimated number of bytes that will be allocated for
// a request of "size" bytes. This is an estimate: an allocation of
// SIZE bytes may reserve more bytes, but will never reserve less.
// (Currently only implemented in tcmalloc, other implementations
// always return SIZE.)
// This is equivalent to malloc_good_size() in OS X.
virtual size_t GetEstimatedAllocatedSize(size_t size);
// Returns the actual number N of bytes reserved by tcmalloc for the
// pointer p. The client is allowed to use the range of bytes
// [p, p+N) in any way it wishes (i.e. N is the "usable size" of this
// allocation). This number may be equal to or greater than the number
// of bytes requested when p was allocated.
// p must have been allocated by this malloc implementation,
// must not be an interior pointer -- that is, must be exactly
// the pointer returned to by malloc() et al., not some offset
// from that -- and should not have been freed yet. p may be NULL.
// (Currently only implemented in tcmalloc; other implementations
// will return 0.)
// This is equivalent to malloc_size() in OS X, malloc_usable_size()
// in glibc, and _msize() for windows.
virtual size_t GetAllocatedSize(const void* p);
// Returns kOwned if this malloc implementation allocated the memory
// pointed to by p, or kNotOwned if some other malloc implementation
// allocated it or p is NULL. May also return kUnknownOwnership if
// the malloc implementation does not keep track of ownership.
// REQUIRES: p must be a value returned from a previous call to
// malloc(), calloc(), realloc(), memalign(), posix_memalign(),
// valloc(), pvalloc(), new, or new[], and must refer to memory that
// is currently allocated (so, for instance, you should not pass in
// a pointer after having called free() on it).
enum Ownership {
// NOTE: Enum values MUST be kept in sync with the version in
// malloc_extension_c.h
kUnknownOwnership = 0,
kOwned,
kNotOwned
};
virtual Ownership GetOwnership(const void* p);
// The current malloc implementation. Always non-NULL.
static MallocExtension* instance();
// Change the malloc implementation. Typically called by the
// malloc implementation during initialization.
static void Register(MallocExtension* implementation);
// Returns detailed information about malloc's freelists. For each list,
// return a FreeListInfo:
struct FreeListInfo {
size_t min_object_size;
size_t max_object_size;
size_t total_bytes_free;
const char* type;
};
// Each item in the vector refers to a different freelist. The lists
// are identified by the range of allocations that objects in the
// list can satisfy ([min_object_size, max_object_size]) and the
// type of freelist (see below). The current size of the list is
// returned in total_bytes_free (which count against a processes
// resident and virtual size).
//
// Currently supported types are:
//
// "tcmalloc.page{_unmapped}" - tcmalloc's page heap. An entry for each size
// class in the page heap is returned. Bytes in "page_unmapped"
// are no longer backed by physical memory and do not count against
// the resident size of a process.
//
// "tcmalloc.large{_unmapped}" - tcmalloc's list of objects larger
// than the largest page heap size class. Only one "large"
// entry is returned. There is no upper-bound on the size
// of objects in the large free list; this call returns
// kint64max for max_object_size. Bytes in
// "large_unmapped" are no longer backed by physical memory
// and do not count against the resident size of a process.
//
// "tcmalloc.central" - tcmalloc's central free-list. One entry per
// size-class is returned. Never unmapped.
//
// "debug.free_queue" - free objects queued by the debug allocator
// and not returned to tcmalloc.
//
// "tcmalloc.thread" - tcmalloc's per-thread caches. Never unmapped.
virtual void GetFreeListSizes(std::vector<FreeListInfo>* v);
// Get a list of stack traces of sampled allocation points. Returns
// a pointer to a "new[]-ed" result array, and stores the sample
// period in "sample_period".
//
// The state is stored as a sequence of adjacent entries
// in the returned array. Each entry has the following form:
// uintptr_t count; // Number of objects with following trace
// uintptr_t size; // Total size of objects with following trace
// uintptr_t depth; // Number of PC values in stack trace
// void* stack[depth]; // PC values that form the stack trace
//
// The list of entries is terminated by a "count" of 0.
//
// It is the responsibility of the caller to "delete[]" the returned array.
//
// May return NULL to indicate no results.
//
// This is an internal extension. Callers should use the more
// convenient "GetHeapSample(string*)" method defined above.
virtual void** ReadStackTraces(int* sample_period);
// Like ReadStackTraces(), but returns stack traces that caused growth
// in the address space size.
virtual void** ReadHeapGrowthStackTraces();
// Returns the size in bytes of the calling threads cache.
virtual size_t GetThreadCacheSize();
// Like MarkThreadIdle, but does not destroy the internal data
// structures of the thread cache. When the thread resumes, it wil
// have an empty cache but will not need to pay to reconstruct the
// cache data structures.
virtual void MarkThreadTemporarilyIdle();
};
namespace base {
// Information passed per range. More fields may be added later.
struct MallocRange {
enum Type {
INUSE, // Application is using this range
FREE, // Range is currently free
UNMAPPED, // Backing physical memory has been returned to the OS
UNKNOWN
// More enum values may be added in the future
};
uintptr_t address; // Address of range
size_t length; // Byte length of range
Type type; // Type of this range
double fraction; // Fraction of range that is being used (0 if !INUSE)
// Perhaps add the following:
// - stack trace if this range was sampled
// - heap growth stack trace if applicable to this range
// - age when allocated (for inuse) or freed (if not in use)
};
} // namespace base
#endif // BASE_MALLOC_EXTENSION_H_

View File

@ -1,101 +0,0 @@
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* --
* Author: Craig Silverstein
*
* C shims for the C++ malloc_extension.h. See malloc_extension.h for
* details. Note these C shims always work on
* MallocExtension::instance(); it is not possible to have more than
* one MallocExtension object in C applications.
*/
#ifndef _MALLOC_EXTENSION_C_H_
#define _MALLOC_EXTENSION_C_H_
#include <stddef.h>
#include <sys/types.h>
/* Annoying stuff for windows -- makes sure clients can import these fns */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define kMallocExtensionHistogramSize 64
PERFTOOLS_DLL_DECL int MallocExtension_VerifyAllMemory(void);
PERFTOOLS_DLL_DECL int MallocExtension_VerifyNewMemory(const void* p);
PERFTOOLS_DLL_DECL int MallocExtension_VerifyArrayNewMemory(const void* p);
PERFTOOLS_DLL_DECL int MallocExtension_VerifyMallocMemory(const void* p);
PERFTOOLS_DLL_DECL int MallocExtension_MallocMemoryStats(int* blocks, size_t* total,
int histogram[kMallocExtensionHistogramSize]);
PERFTOOLS_DLL_DECL void MallocExtension_GetStats(char* buffer, int buffer_length);
/* TODO(csilvers): write a C version of these routines, that perhaps
* takes a function ptr and a void *.
*/
/* void MallocExtension_GetHeapSample(string* result); */
/* void MallocExtension_GetHeapGrowthStacks(string* result); */
PERFTOOLS_DLL_DECL int MallocExtension_GetNumericProperty(const char* property, size_t* value);
PERFTOOLS_DLL_DECL int MallocExtension_SetNumericProperty(const char* property, size_t value);
PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadIdle(void);
PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadBusy(void);
PERFTOOLS_DLL_DECL void MallocExtension_ReleaseToSystem(size_t num_bytes);
PERFTOOLS_DLL_DECL void MallocExtension_ReleaseFreeMemory(void);
PERFTOOLS_DLL_DECL size_t MallocExtension_GetEstimatedAllocatedSize(size_t size);
PERFTOOLS_DLL_DECL size_t MallocExtension_GetAllocatedSize(const void* p);
PERFTOOLS_DLL_DECL size_t MallocExtension_GetThreadCacheSize(void);
PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadTemporarilyIdle(void);
/*
* NOTE: These enum values MUST be kept in sync with the version in
* malloc_extension.h
*/
typedef enum {
MallocExtension_kUnknownOwnership = 0,
MallocExtension_kOwned,
MallocExtension_kNotOwned
} MallocExtension_Ownership;
PERFTOOLS_DLL_DECL MallocExtension_Ownership MallocExtension_GetOwnership(const void* p);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* _MALLOC_EXTENSION_C_H_ */

View File

@ -1,359 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// Some of our malloc implementations can invoke the following hooks whenever
// memory is allocated or deallocated. MallocHook is thread-safe, and things
// you do before calling AddFooHook(MyHook) are visible to any resulting calls
// to MyHook. Hooks must be thread-safe. If you write:
//
// CHECK(MallocHook::AddNewHook(&MyNewHook));
//
// MyNewHook will be invoked in subsequent calls in the current thread, but
// there are no guarantees on when it might be invoked in other threads.
//
// There are a limited number of slots available for each hook type. Add*Hook
// will return false if there are no slots available. Remove*Hook will return
// false if the given hook was not already installed.
//
// The order in which individual hooks are called in Invoke*Hook is undefined.
//
// It is safe for a hook to remove itself within Invoke*Hook and add other
// hooks. Any hooks added inside a hook invocation (for the same hook type)
// will not be invoked for the current invocation.
//
// One important user of these hooks is the heap profiler.
//
// CAVEAT: If you add new MallocHook::Invoke* calls then those calls must be
// directly in the code of the (de)allocation function that is provided to the
// user and that function must have an ATTRIBUTE_SECTION(malloc_hook) attribute.
//
// Note: the Invoke*Hook() functions are defined in malloc_hook-inl.h. If you
// need to invoke a hook (which you shouldn't unless you're part of tcmalloc),
// be sure to #include malloc_hook-inl.h in addition to malloc_hook.h.
//
// NOTE FOR C USERS: If you want to use malloc_hook functionality from
// a C program, #include malloc_hook_c.h instead of this file.
#ifndef _MALLOC_HOOK_H_
#define _MALLOC_HOOK_H_
#include <stddef.h>
#include <sys/types.h>
extern "C" {
#include "malloc_hook_c.h" // a C version of the malloc_hook interface
}
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
// The C++ methods below call the C version (MallocHook_*), and thus
// convert between an int and a bool. Windows complains about this
// (a "performance warning") which we don't care about, so we suppress.
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4800)
#endif
// Note: malloc_hook_c.h defines MallocHook_*Hook and
// MallocHook_{Add,Remove}*Hook. The version of these inside the MallocHook
// class are defined in terms of the malloc_hook_c version. See malloc_hook_c.h
// for details of these types/functions.
class PERFTOOLS_DLL_DECL MallocHook {
public:
// The NewHook is invoked whenever an object is allocated.
// It may be passed NULL if the allocator returned NULL.
typedef MallocHook_NewHook NewHook;
inline static bool AddNewHook(NewHook hook) {
return MallocHook_AddNewHook(hook);
}
inline static bool RemoveNewHook(NewHook hook) {
return MallocHook_RemoveNewHook(hook);
}
inline static void InvokeNewHook(const void* p, size_t s);
// The DeleteHook is invoked whenever an object is deallocated.
// It may be passed NULL if the caller is trying to delete NULL.
typedef MallocHook_DeleteHook DeleteHook;
inline static bool AddDeleteHook(DeleteHook hook) {
return MallocHook_AddDeleteHook(hook);
}
inline static bool RemoveDeleteHook(DeleteHook hook) {
return MallocHook_RemoveDeleteHook(hook);
}
inline static void InvokeDeleteHook(const void* p);
// The PreMmapHook is invoked with mmap or mmap64 arguments just
// before the call is actually made. Such a hook may be useful
// in memory limited contexts, to catch allocations that will exceed
// a memory limit, and take outside actions to increase that limit.
typedef MallocHook_PreMmapHook PreMmapHook;
inline static bool AddPreMmapHook(PreMmapHook hook) {
return MallocHook_AddPreMmapHook(hook);
}
inline static bool RemovePreMmapHook(PreMmapHook hook) {
return MallocHook_RemovePreMmapHook(hook);
}
inline static void InvokePreMmapHook(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
// The MmapReplacement is invoked after the PreMmapHook but before
// the call is actually made. The MmapReplacement should return true
// if it handled the call, or false if it is still necessary to
// call mmap/mmap64.
// This should be used only by experts, and users must be be
// extremely careful to avoid recursive calls to mmap. The replacement
// should be async signal safe.
// Only one MmapReplacement is supported. After setting an MmapReplacement
// you must call RemoveMmapReplacement before calling SetMmapReplacement
// again.
typedef MallocHook_MmapReplacement MmapReplacement;
inline static bool SetMmapReplacement(MmapReplacement hook) {
return MallocHook_SetMmapReplacement(hook);
}
inline static bool RemoveMmapReplacement(MmapReplacement hook) {
return MallocHook_RemoveMmapReplacement(hook);
}
inline static bool InvokeMmapReplacement(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result);
// The MmapHook is invoked whenever a region of memory is mapped.
// It may be passed MAP_FAILED if the mmap failed.
typedef MallocHook_MmapHook MmapHook;
inline static bool AddMmapHook(MmapHook hook) {
return MallocHook_AddMmapHook(hook);
}
inline static bool RemoveMmapHook(MmapHook hook) {
return MallocHook_RemoveMmapHook(hook);
}
inline static void InvokeMmapHook(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
// The MunmapReplacement is invoked with munmap arguments just before
// the call is actually made. The MunmapReplacement should return true
// if it handled the call, or false if it is still necessary to
// call munmap.
// This should be used only by experts. The replacement should be
// async signal safe.
// Only one MunmapReplacement is supported. After setting an
// MunmapReplacement you must call RemoveMunmapReplacement before
// calling SetMunmapReplacement again.
typedef MallocHook_MunmapReplacement MunmapReplacement;
inline static bool SetMunmapReplacement(MunmapReplacement hook) {
return MallocHook_SetMunmapReplacement(hook);
}
inline static bool RemoveMunmapReplacement(MunmapReplacement hook) {
return MallocHook_RemoveMunmapReplacement(hook);
}
inline static bool InvokeMunmapReplacement(const void* p,
size_t size,
int* result);
// The MunmapHook is invoked whenever a region of memory is unmapped.
typedef MallocHook_MunmapHook MunmapHook;
inline static bool AddMunmapHook(MunmapHook hook) {
return MallocHook_AddMunmapHook(hook);
}
inline static bool RemoveMunmapHook(MunmapHook hook) {
return MallocHook_RemoveMunmapHook(hook);
}
inline static void InvokeMunmapHook(const void* p, size_t size);
// The MremapHook is invoked whenever a region of memory is remapped.
typedef MallocHook_MremapHook MremapHook;
inline static bool AddMremapHook(MremapHook hook) {
return MallocHook_AddMremapHook(hook);
}
inline static bool RemoveMremapHook(MremapHook hook) {
return MallocHook_RemoveMremapHook(hook);
}
inline static void InvokeMremapHook(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr);
// The PreSbrkHook is invoked just before sbrk is called -- except when
// the increment is 0. This is because sbrk(0) is often called
// to get the top of the memory stack, and is not actually a
// memory-allocation call. It may be useful in memory-limited contexts,
// to catch allocations that will exceed the limit and take outside
// actions to increase such a limit.
typedef MallocHook_PreSbrkHook PreSbrkHook;
inline static bool AddPreSbrkHook(PreSbrkHook hook) {
return MallocHook_AddPreSbrkHook(hook);
}
inline static bool RemovePreSbrkHook(PreSbrkHook hook) {
return MallocHook_RemovePreSbrkHook(hook);
}
inline static void InvokePreSbrkHook(ptrdiff_t increment);
// The SbrkHook is invoked whenever sbrk is called -- except when
// the increment is 0. This is because sbrk(0) is often called
// to get the top of the memory stack, and is not actually a
// memory-allocation call.
typedef MallocHook_SbrkHook SbrkHook;
inline static bool AddSbrkHook(SbrkHook hook) {
return MallocHook_AddSbrkHook(hook);
}
inline static bool RemoveSbrkHook(SbrkHook hook) {
return MallocHook_RemoveSbrkHook(hook);
}
inline static void InvokeSbrkHook(const void* result, ptrdiff_t increment);
// Get the current stack trace. Try to skip all routines up to and
// and including the caller of MallocHook::Invoke*.
// Use "skip_count" (similarly to GetStackTrace from stacktrace.h)
// as a hint about how many routines to skip if better information
// is not available.
inline static int GetCallerStackTrace(void** result, int max_depth,
int skip_count) {
return MallocHook_GetCallerStackTrace(result, max_depth, skip_count);
}
// Unhooked versions of mmap() and munmap(). These should be used
// only by experts, since they bypass heapchecking, etc.
// Note: These do not run hooks, but they still use the MmapReplacement
// and MunmapReplacement.
static void* UnhookedMMap(void *start, size_t length, int prot, int flags,
int fd, off_t offset);
static int UnhookedMUnmap(void *start, size_t length);
// The following are DEPRECATED.
inline static NewHook GetNewHook();
inline static NewHook SetNewHook(NewHook hook) {
return MallocHook_SetNewHook(hook);
}
inline static DeleteHook GetDeleteHook();
inline static DeleteHook SetDeleteHook(DeleteHook hook) {
return MallocHook_SetDeleteHook(hook);
}
inline static PreMmapHook GetPreMmapHook();
inline static PreMmapHook SetPreMmapHook(PreMmapHook hook) {
return MallocHook_SetPreMmapHook(hook);
}
inline static MmapHook GetMmapHook();
inline static MmapHook SetMmapHook(MmapHook hook) {
return MallocHook_SetMmapHook(hook);
}
inline static MunmapHook GetMunmapHook();
inline static MunmapHook SetMunmapHook(MunmapHook hook) {
return MallocHook_SetMunmapHook(hook);
}
inline static MremapHook GetMremapHook();
inline static MremapHook SetMremapHook(MremapHook hook) {
return MallocHook_SetMremapHook(hook);
}
inline static PreSbrkHook GetPreSbrkHook();
inline static PreSbrkHook SetPreSbrkHook(PreSbrkHook hook) {
return MallocHook_SetPreSbrkHook(hook);
}
inline static SbrkHook GetSbrkHook();
inline static SbrkHook SetSbrkHook(SbrkHook hook) {
return MallocHook_SetSbrkHook(hook);
}
// End of DEPRECATED methods.
private:
// Slow path versions of Invoke*Hook.
static void InvokeNewHookSlow(const void* p, size_t s);
static void InvokeDeleteHookSlow(const void* p);
static void InvokePreMmapHookSlow(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
static void InvokeMmapHookSlow(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
static bool InvokeMmapReplacementSlow(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result);
static void InvokeMunmapHookSlow(const void* p, size_t size);
static bool InvokeMunmapReplacementSlow(const void* p,
size_t size,
int* result);
static void InvokeMremapHookSlow(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr);
static void InvokePreSbrkHookSlow(ptrdiff_t increment);
static void InvokeSbrkHookSlow(const void* result, ptrdiff_t increment);
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif /* _MALLOC_HOOK_H_ */

View File

@ -1,173 +0,0 @@
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* --
* Author: Craig Silverstein
*
* C shims for the C++ malloc_hook.h. See malloc_hook.h for details
* on how to use these.
*/
#ifndef _MALLOC_HOOK_C_H_
#define _MALLOC_HOOK_C_H_
#include <stddef.h>
#include <sys/types.h>
/* Annoying stuff for windows; makes sure clients can import these functions */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Get the current stack trace. Try to skip all routines up to and
* and including the caller of MallocHook::Invoke*.
* Use "skip_count" (similarly to GetStackTrace from stacktrace.h)
* as a hint about how many routines to skip if better information
* is not available.
*/
PERFTOOLS_DLL_DECL
int MallocHook_GetCallerStackTrace(void** result, int max_depth,
int skip_count);
/* The MallocHook_{Add,Remove}*Hook functions return 1 on success and 0 on
* failure.
*/
typedef void (*MallocHook_NewHook)(const void* ptr, size_t size);
PERFTOOLS_DLL_DECL
int MallocHook_AddNewHook(MallocHook_NewHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveNewHook(MallocHook_NewHook hook);
typedef void (*MallocHook_DeleteHook)(const void* ptr);
PERFTOOLS_DLL_DECL
int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook);
typedef void (*MallocHook_PreMmapHook)(const void *start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
PERFTOOLS_DLL_DECL
int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook);
typedef void (*MallocHook_MmapHook)(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
PERFTOOLS_DLL_DECL
int MallocHook_AddMmapHook(MallocHook_MmapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook);
typedef int (*MallocHook_MmapReplacement)(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result);
int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook);
int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook);
typedef void (*MallocHook_MunmapHook)(const void* ptr, size_t size);
PERFTOOLS_DLL_DECL
int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook);
typedef int (*MallocHook_MunmapReplacement)(const void* ptr,
size_t size,
int* result);
int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook);
int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook);
typedef void (*MallocHook_MremapHook)(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr);
PERFTOOLS_DLL_DECL
int MallocHook_AddMremapHook(MallocHook_MremapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook);
typedef void (*MallocHook_PreSbrkHook)(ptrdiff_t increment);
PERFTOOLS_DLL_DECL
int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook);
typedef void (*MallocHook_SbrkHook)(const void* result, ptrdiff_t increment);
PERFTOOLS_DLL_DECL
int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook);
/* The following are DEPRECATED. */
PERFTOOLS_DLL_DECL
MallocHook_NewHook MallocHook_SetNewHook(MallocHook_NewHook hook);
PERFTOOLS_DLL_DECL
MallocHook_DeleteHook MallocHook_SetDeleteHook(MallocHook_DeleteHook hook);
PERFTOOLS_DLL_DECL
MallocHook_PreMmapHook MallocHook_SetPreMmapHook(MallocHook_PreMmapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_MmapHook MallocHook_SetMmapHook(MallocHook_MmapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_MunmapHook MallocHook_SetMunmapHook(MallocHook_MunmapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_MremapHook MallocHook_SetMremapHook(MallocHook_MremapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_PreSbrkHook MallocHook_SetPreSbrkHook(MallocHook_PreSbrkHook hook);
PERFTOOLS_DLL_DECL
MallocHook_SbrkHook MallocHook_SetSbrkHook(MallocHook_SbrkHook hook);
/* End of DEPRECATED functions. */
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* _MALLOC_HOOK_C_H_ */

View File

@ -1,169 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*
* Module for CPU profiling based on periodic pc-sampling.
*
* For full(er) information, see doc/cpuprofile.html
*
* This module is linked into your program with
* no slowdown caused by this unless you activate the profiler
* using one of the following methods:
*
* 1. Before starting the program, set the environment variable
* "CPUPROFILE" to be the name of the file to which the profile
* data should be written.
*
* 2. Programmatically, start and stop the profiler using the
* routines "ProfilerStart(filename)" and "ProfilerStop()".
*
*
* (Note: if using linux 2.4 or earlier, only the main thread may be
* profiled.)
*
* Use pprof to view the resulting profile output.
* % pprof <path_to_executable> <profile_file_name>
* % pprof --gv <path_to_executable> <profile_file_name>
*
* These functions are thread-safe.
*/
#ifndef BASE_PROFILER_H_
#define BASE_PROFILER_H_
#include <time.h> /* For time_t */
/* Annoying stuff for windows; makes sure clients can import these functions */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
/* All this code should be usable from within C apps. */
#ifdef __cplusplus
extern "C" {
#endif
/* Profiler options, for use with ProfilerStartWithOptions. To use:
*
* struct ProfilerOptions options;
* memset(&options, 0, sizeof options);
*
* then fill in fields as needed.
*
* This structure is intended to be usable from C code, so no constructor
* is provided to initialize it. (Use memset as described above).
*/
struct ProfilerOptions {
/* Filter function and argument.
*
* If filter_in_thread is not NULL, when a profiling tick is delivered
* the profiler will call:
*
* (*filter_in_thread)(filter_in_thread_arg)
*
* If it returns nonzero, the sample will be included in the profile.
* Note that filter_in_thread runs in a signal handler, so must be
* async-signal-safe.
*
* A typical use would be to set up filter results for each thread
* in the system before starting the profiler, then to make
* filter_in_thread be a very simple function which retrieves those
* results in an async-signal-safe way. Retrieval could be done
* using thread-specific data, or using a shared data structure that
* supports async-signal-safe lookups.
*/
int (*filter_in_thread)(void *arg);
void *filter_in_thread_arg;
};
/* Start profiling and write profile info into fname, discarding any
* existing profiling data in that file.
*
* This is equivalent to calling ProfilerStartWithOptions(fname, NULL).
*/
PERFTOOLS_DLL_DECL int ProfilerStart(const char* fname);
/* Start profiling and write profile into fname, discarding any
* existing profiling data in that file.
*
* The profiler is configured using the options given by 'options'.
* Options which are not specified are given default values.
*
* 'options' may be NULL, in which case all are given default values.
*
* Returns nonzero if profiling was started successfully, or zero else.
*/
PERFTOOLS_DLL_DECL int ProfilerStartWithOptions(
const char *fname, const struct ProfilerOptions *options);
/* Stop profiling. Can be started again with ProfilerStart(), but
* the currently accumulated profiling data will be cleared.
*/
PERFTOOLS_DLL_DECL void ProfilerStop(void);
/* Flush any currently buffered profiling state to the profile file.
* Has no effect if the profiler has not been started.
*/
PERFTOOLS_DLL_DECL void ProfilerFlush(void);
/* DEPRECATED: these functions were used to enable/disable profiling
* in the current thread, but no longer do anything.
*/
PERFTOOLS_DLL_DECL void ProfilerEnable(void);
PERFTOOLS_DLL_DECL void ProfilerDisable(void);
/* Returns nonzero if profile is currently enabled, zero if it's not. */
PERFTOOLS_DLL_DECL int ProfilingIsEnabledForAllThreads(void);
/* Routine for registering new threads with the profiler.
*/
PERFTOOLS_DLL_DECL void ProfilerRegisterThread(void);
/* Stores state about profiler's current status into "*state". */
struct ProfilerState {
int enabled; /* Is profiling currently enabled? */
time_t start_time; /* If enabled, when was profiling started? */
char profile_name[1024]; /* Name of profile file being written, or '\0' */
int samples_gathered; /* Number of samples gathered so far (or 0) */
};
PERFTOOLS_DLL_DECL void ProfilerGetCurrentState(struct ProfilerState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* BASE_PROFILER_H_ */

View File

@ -1,117 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// Routines to extract the current stack trace. These functions are
// thread-safe.
#ifndef GOOGLE_STACKTRACE_H_
#define GOOGLE_STACKTRACE_H_
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
// Skips the most recent "skip_count" stack frames (also skips the
// frame generated for the "GetStackFrames" routine itself), and then
// records the pc values for up to the next "max_depth" frames in
// "result", and the corresponding stack frame sizes in "sizes".
// Returns the number of values recorded in "result"/"sizes".
//
// Example:
// main() { foo(); }
// foo() { bar(); }
// bar() {
// void* result[10];
// int sizes[10];
// int depth = GetStackFrames(result, sizes, 10, 1);
// }
//
// The GetStackFrames call will skip the frame for "bar". It will
// return 2 and will produce pc values that map to the following
// procedures:
// result[0] foo
// result[1] main
// (Actually, there may be a few more entries after "main" to account for
// startup procedures.)
// And corresponding stack frame sizes will also be recorded:
// sizes[0] 16
// sizes[1] 16
// (Stack frame sizes of 16 above are just for illustration purposes.)
// Stack frame sizes of 0 or less indicate that those frame sizes couldn't
// be identified.
//
// This routine may return fewer stack frame entries than are
// available. Also note that "result" and "sizes" must both be non-NULL.
extern PERFTOOLS_DLL_DECL int GetStackFrames(void** result, int* sizes, int max_depth,
int skip_count);
// Same as above, but to be used from a signal handler. The "uc" parameter
// should be the pointer to ucontext_t which was passed as the 3rd parameter
// to sa_sigaction signal handler. It may help the unwinder to get a
// better stack trace under certain conditions. The "uc" may safely be NULL.
extern PERFTOOLS_DLL_DECL int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
int skip_count, const void *uc);
// This is similar to the GetStackFrames routine, except that it returns
// the stack trace only, and not the stack frame sizes as well.
// Example:
// main() { foo(); }
// foo() { bar(); }
// bar() {
// void* result[10];
// int depth = GetStackTrace(result, 10, 1);
// }
//
// This produces:
// result[0] foo
// result[1] main
// .... ...
//
// "result" must not be NULL.
extern PERFTOOLS_DLL_DECL int GetStackTrace(void** result, int max_depth,
int skip_count);
// Same as above, but to be used from a signal handler. The "uc" parameter
// should be the pointer to ucontext_t which was passed as the 3rd parameter
// to sa_sigaction signal handler. It may help the unwinder to get a
// better stack trace under certain conditions. The "uc" may safely be NULL.
extern PERFTOOLS_DLL_DECL int GetStackTraceWithContext(void** result, int max_depth,
int skip_count, const void *uc);
#endif /* GOOGLE_STACKTRACE_H_ */

View File

@ -1,160 +0,0 @@
// -*- Mode: C; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2003, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat <opensource@google.com>
* .h file by Craig Silverstein <opensource@google.com>
*/
#ifndef TCMALLOC_TCMALLOC_H_
#define TCMALLOC_TCMALLOC_H_
#include <stddef.h> /* for size_t */
/* Define the version number so folks can check against it */
#define TC_VERSION_MAJOR 2
#define TC_VERSION_MINOR 5
#define TC_VERSION_PATCH ""
#define TC_VERSION_STRING "gperftools 2.5"
/* For struct mallinfo, if it's defined. */
#if !defined(__APPLE__) && !defined(__FreeBSD__)
# include <malloc.h>
#else
struct mallinfo {
size_t arena; /* non-mmapped space allocated from system */
size_t ordblks; /* number of free chunks */
size_t smblks; /* always 0 */
size_t hblks; /* always 0 */
size_t hblkhd; /* space in mmapped regions */
size_t usmblks; /* maximum total allocated space */
size_t fsmblks; /* always 0 */
size_t uordblks; /* total allocated space */
size_t fordblks; /* total free space */
size_t keepcost; /* releasable (via malloc_trim) space */
};
#endif
#ifdef __cplusplus
#define PERFTOOLS_THROW throw()
#else
# ifdef __GNUC__
# define PERFTOOLS_THROW __attribute__((__nothrow__))
# else
# define PERFTOOLS_THROW
# endif
#endif
#ifndef PERFTOOLS_DLL_DECL
#define PERFTOOLS_DLL_DECL_DEFINED
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
#ifdef __cplusplus
namespace std {
struct nothrow_t;
}
extern "C" {
#endif
/*
* Returns a human-readable version string. If major, minor,
* and/or patch are not NULL, they are set to the major version,
* minor version, and patch-code (a string, usually "").
*/
PERFTOOLS_DLL_DECL const char* tc_version(int* major, int* minor,
const char** patch) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_malloc_skip_new_handler(size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_free(void* ptr) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_free_sized(void *ptr, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_realloc(void* ptr, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_calloc(size_t nmemb, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_cfree(void* ptr) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_memalign(size_t __alignment,
size_t __size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL int tc_posix_memalign(void** ptr,
size_t align, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_valloc(size_t __size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_pvalloc(size_t __size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_malloc_stats(void) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) PERFTOOLS_THROW;
#if 1
PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) PERFTOOLS_THROW;
#endif
/*
* This is an alias for MallocExtension::instance()->GetAllocatedSize().
* It is equivalent to
* OS X: malloc_size()
* glibc: malloc_usable_size()
* Windows: _msize()
*/
PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) PERFTOOLS_THROW;
#ifdef __cplusplus
PERFTOOLS_DLL_DECL int tc_set_new_mode(int flag) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_new(size_t size);
PERFTOOLS_DLL_DECL void* tc_new_nothrow(size_t size,
const std::nothrow_t&) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_delete(void* p) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_delete_sized(void* p, size_t size) throw();
PERFTOOLS_DLL_DECL void tc_delete_nothrow(void* p,
const std::nothrow_t&) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_newarray(size_t size);
PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(size_t size,
const std::nothrow_t&) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_deletearray(void* p) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_deletearray_sized(void* p, size_t size) throw();
PERFTOOLS_DLL_DECL void tc_deletearray_nothrow(void* p,
const std::nothrow_t&) PERFTOOLS_THROW;
}
#endif
/* We're only un-defining those for public */
#if !defined(GPERFTOOLS_CONFIG_H_)
#undef PERFTOOLS_THROW
#ifdef PERFTOOLS_DLL_DECL_DEFINED
#undef PERFTOOLS_DLL_DECL
#undef PERFTOOLS_DLL_DECL_DEFINED
#endif
#endif /* GPERFTOOLS_CONFIG_H_ */
#endif /* #ifndef TCMALLOC_TCMALLOC_H_ */

View File

@ -1,422 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// A fast map from addresses to values. Assumes that addresses are
// clustered. The main use is intended to be for heap-profiling.
// May be too memory-hungry for other uses.
//
// We use a user-defined allocator/de-allocator so that we can use
// this data structure during heap-profiling.
//
// IMPLEMENTATION DETAIL:
//
// Some default definitions/parameters:
// * Block -- aligned 128-byte region of the address space
// * Cluster -- aligned 1-MB region of the address space
// * Block-ID -- block-number within a cluster
// * Cluster-ID -- Starting address of cluster divided by cluster size
//
// We use a three-level map to represent the state:
// 1. A hash-table maps from a cluster-ID to the data for that cluster.
// 2. For each non-empty cluster we keep an array indexed by
// block-ID tht points to the first entry in the linked-list
// for the block.
// 3. At the bottom, we keep a singly-linked list of all
// entries in a block (for non-empty blocks).
//
// hash table
// +-------------+
// | id->cluster |---> ...
// | ... |
// | id->cluster |---> Cluster
// +-------------+ +-------+ Data for one block
// | nil | +------------------------------------+
// | ----+---|->[addr/value]-->[addr/value]-->... |
// | nil | +------------------------------------+
// | ----+--> ...
// | nil |
// | ... |
// +-------+
//
// Note that we require zero-bytes of overhead for completely empty
// clusters. The minimum space requirement for a cluster is the size
// of the hash-table entry plus a pointer value for each block in
// the cluster. Empty blocks impose no extra space requirement.
//
// The cost of a lookup is:
// a. A hash-table lookup to find the cluster
// b. An array access in the cluster structure
// c. A traversal over the linked-list for a block
#ifndef BASE_ADDRESSMAP_INL_H_
#define BASE_ADDRESSMAP_INL_H_
#include "config.h"
#include <stddef.h>
#include <string.h>
#if defined HAVE_STDINT_H
#include <stdint.h> // to get uint16_t (ISO naming madness)
#elif defined HAVE_INTTYPES_H
#include <inttypes.h> // another place uint16_t might be defined
#else
#include <sys/types.h> // our last best hope
#endif
// This class is thread-unsafe -- that is, instances of this class can
// not be accessed concurrently by multiple threads -- because the
// callback function for Iterate() may mutate contained values. If the
// callback functions you pass do not mutate their Value* argument,
// AddressMap can be treated as thread-compatible -- that is, it's
// safe for multiple threads to call "const" methods on this class,
// but not safe for one thread to call const methods on this class
// while another thread is calling non-const methods on the class.
template <class Value>
class AddressMap {
public:
typedef void* (*Allocator)(size_t size);
typedef void (*DeAllocator)(void* ptr);
typedef const void* Key;
// Create an AddressMap that uses the specified allocator/deallocator.
// The allocator/deallocator should behave like malloc/free.
// For instance, the allocator does not need to return initialized memory.
AddressMap(Allocator alloc, DeAllocator dealloc);
~AddressMap();
// If the map contains an entry for "key", return it. Else return NULL.
inline const Value* Find(Key key) const;
inline Value* FindMutable(Key key);
// Insert <key,value> into the map. Any old value associated
// with key is forgotten.
void Insert(Key key, Value value);
// Remove any entry for key in the map. If an entry was found
// and removed, stores the associated value in "*removed_value"
// and returns true. Else returns false.
bool FindAndRemove(Key key, Value* removed_value);
// Similar to Find but we assume that keys are addresses of non-overlapping
// memory ranges whose sizes are given by size_func.
// If the map contains a range into which "key" points
// (at its start or inside of it, but not at the end),
// return the address of the associated value
// and store its key in "*res_key".
// Else return NULL.
// max_size specifies largest range size possibly in existence now.
typedef size_t (*ValueSizeFunc)(const Value& v);
const Value* FindInside(ValueSizeFunc size_func, size_t max_size,
Key key, Key* res_key);
// Iterate over the address map calling 'callback'
// for all stored key-value pairs and passing 'arg' to it.
// We don't use full Closure/Callback machinery not to add
// unnecessary dependencies to this class with low-level uses.
template<class Type>
inline void Iterate(void (*callback)(Key, Value*, Type), Type arg) const;
private:
typedef uintptr_t Number;
// The implementation assumes that addresses inserted into the map
// will be clustered. We take advantage of this fact by splitting
// up the address-space into blocks and using a linked-list entry
// for each block.
// Size of each block. There is one linked-list for each block, so
// do not make the block-size too big. Oterwise, a lot of time
// will be spent traversing linked lists.
static const int kBlockBits = 7;
static const int kBlockSize = 1 << kBlockBits;
// Entry kept in per-block linked-list
struct Entry {
Entry* next;
Key key;
Value value;
};
// We further group a sequence of consecutive blocks into a cluster.
// The data for a cluster is represented as a dense array of
// linked-lists, one list per contained block.
static const int kClusterBits = 13;
static const Number kClusterSize = 1 << (kBlockBits + kClusterBits);
static const int kClusterBlocks = 1 << kClusterBits;
// We use a simple chaining hash-table to represent the clusters.
struct Cluster {
Cluster* next; // Next cluster in hash table chain
Number id; // Cluster ID
Entry* blocks[kClusterBlocks]; // Per-block linked-lists
};
// Number of hash-table entries. With the block-size/cluster-size
// defined above, each cluster covers 1 MB, so an 4K entry
// hash-table will give an average hash-chain length of 1 for 4GB of
// in-use memory.
static const int kHashBits = 12;
static const int kHashSize = 1 << 12;
// Number of entry objects allocated at a time
static const int ALLOC_COUNT = 64;
Cluster** hashtable_; // The hash-table
Entry* free_; // Free list of unused Entry objects
// Multiplicative hash function:
// The value "kHashMultiplier" is the bottom 32 bits of
// int((sqrt(5)-1)/2 * 2^32)
// This is a good multiplier as suggested in CLR, Knuth. The hash
// value is taken to be the top "k" bits of the bottom 32 bits
// of the muliplied value.
static const uint32_t kHashMultiplier = 2654435769u;
static int HashInt(Number x) {
// Multiply by a constant and take the top bits of the result.
const uint32_t m = static_cast<uint32_t>(x) * kHashMultiplier;
return static_cast<int>(m >> (32 - kHashBits));
}
// Find cluster object for specified address. If not found
// and "create" is true, create the object. If not found
// and "create" is false, return NULL.
//
// This method is bitwise-const if create is false.
Cluster* FindCluster(Number address, bool create) {
// Look in hashtable
const Number cluster_id = address >> (kBlockBits + kClusterBits);
const int h = HashInt(cluster_id);
for (Cluster* c = hashtable_[h]; c != NULL; c = c->next) {
if (c->id == cluster_id) {
return c;
}
}
// Create cluster if necessary
if (create) {
Cluster* c = New<Cluster>(1);
c->id = cluster_id;
c->next = hashtable_[h];
hashtable_[h] = c;
return c;
}
return NULL;
}
// Return the block ID for an address within its cluster
static int BlockID(Number address) {
return (address >> kBlockBits) & (kClusterBlocks - 1);
}
//--------------------------------------------------------------
// Memory management -- we keep all objects we allocate linked
// together in a singly linked list so we can get rid of them
// when we are all done. Furthermore, we allow the client to
// pass in custom memory allocator/deallocator routines.
//--------------------------------------------------------------
struct Object {
Object* next;
// The real data starts here
};
Allocator alloc_; // The allocator
DeAllocator dealloc_; // The deallocator
Object* allocated_; // List of allocated objects
// Allocates a zeroed array of T with length "num". Also inserts
// the allocated block into a linked list so it can be deallocated
// when we are all done.
template <class T> T* New(int num) {
void* ptr = (*alloc_)(sizeof(Object) + num*sizeof(T));
memset(ptr, 0, sizeof(Object) + num*sizeof(T));
Object* obj = reinterpret_cast<Object*>(ptr);
obj->next = allocated_;
allocated_ = obj;
return reinterpret_cast<T*>(reinterpret_cast<Object*>(ptr) + 1);
}
};
// More implementation details follow:
template <class Value>
AddressMap<Value>::AddressMap(Allocator alloc, DeAllocator dealloc)
: free_(NULL),
alloc_(alloc),
dealloc_(dealloc),
allocated_(NULL) {
hashtable_ = New<Cluster*>(kHashSize);
}
template <class Value>
AddressMap<Value>::~AddressMap() {
// De-allocate all of the objects we allocated
for (Object* obj = allocated_; obj != NULL; /**/) {
Object* next = obj->next;
(*dealloc_)(obj);
obj = next;
}
}
template <class Value>
inline const Value* AddressMap<Value>::Find(Key key) const {
return const_cast<AddressMap*>(this)->FindMutable(key);
}
template <class Value>
inline Value* AddressMap<Value>::FindMutable(Key key) {
const Number num = reinterpret_cast<Number>(key);
const Cluster* const c = FindCluster(num, false/*do not create*/);
if (c != NULL) {
for (Entry* e = c->blocks[BlockID(num)]; e != NULL; e = e->next) {
if (e->key == key) {
return &e->value;
}
}
}
return NULL;
}
template <class Value>
void AddressMap<Value>::Insert(Key key, Value value) {
const Number num = reinterpret_cast<Number>(key);
Cluster* const c = FindCluster(num, true/*create*/);
// Look in linked-list for this block
const int block = BlockID(num);
for (Entry* e = c->blocks[block]; e != NULL; e = e->next) {
if (e->key == key) {
e->value = value;
return;
}
}
// Create entry
if (free_ == NULL) {
// Allocate a new batch of entries and add to free-list
Entry* array = New<Entry>(ALLOC_COUNT);
for (int i = 0; i < ALLOC_COUNT-1; i++) {
array[i].next = &array[i+1];
}
array[ALLOC_COUNT-1].next = free_;
free_ = &array[0];
}
Entry* e = free_;
free_ = e->next;
e->key = key;
e->value = value;
e->next = c->blocks[block];
c->blocks[block] = e;
}
template <class Value>
bool AddressMap<Value>::FindAndRemove(Key key, Value* removed_value) {
const Number num = reinterpret_cast<Number>(key);
Cluster* const c = FindCluster(num, false/*do not create*/);
if (c != NULL) {
for (Entry** p = &c->blocks[BlockID(num)]; *p != NULL; p = &(*p)->next) {
Entry* e = *p;
if (e->key == key) {
*removed_value = e->value;
*p = e->next; // Remove e from linked-list
e->next = free_; // Add e to free-list
free_ = e;
return true;
}
}
}
return false;
}
template <class Value>
const Value* AddressMap<Value>::FindInside(ValueSizeFunc size_func,
size_t max_size,
Key key,
Key* res_key) {
const Number key_num = reinterpret_cast<Number>(key);
Number num = key_num; // we'll move this to move back through the clusters
while (1) {
const Cluster* c = FindCluster(num, false/*do not create*/);
if (c != NULL) {
while (1) {
const int block = BlockID(num);
bool had_smaller_key = false;
for (const Entry* e = c->blocks[block]; e != NULL; e = e->next) {
const Number e_num = reinterpret_cast<Number>(e->key);
if (e_num <= key_num) {
if (e_num == key_num || // to handle 0-sized ranges
key_num < e_num + (*size_func)(e->value)) {
*res_key = e->key;
return &e->value;
}
had_smaller_key = true;
}
}
if (had_smaller_key) return NULL; // got a range before 'key'
// and it did not contain 'key'
if (block == 0) break;
// try address-wise previous block
num |= kBlockSize - 1; // start at the last addr of prev block
num -= kBlockSize;
if (key_num - num > max_size) return NULL;
}
}
if (num < kClusterSize) return NULL; // first cluster
// go to address-wise previous cluster to try
num |= kClusterSize - 1; // start at the last block of previous cluster
num -= kClusterSize;
if (key_num - num > max_size) return NULL;
// Having max_size to limit the search is crucial: else
// we have to traverse a lot of empty clusters (or blocks).
// We can avoid needing max_size if we put clusters into
// a search tree, but performance suffers considerably
// if we use this approach by using stl::set.
}
}
template <class Value>
template <class Type>
inline void AddressMap<Value>::Iterate(void (*callback)(Key, Value*, Type),
Type arg) const {
// We could optimize this by traversing only non-empty clusters and/or blocks
// but it does not speed up heap-checker noticeably.
for (int h = 0; h < kHashSize; ++h) {
for (const Cluster* c = hashtable_[h]; c != NULL; c = c->next) {
for (int b = 0; b < kClusterBlocks; ++b) {
for (Entry* e = c->blocks[b]; e != NULL; e = e->next) {
callback(e->key, &e->value, arg);
}
}
}
}
}
#endif // BASE_ADDRESSMAP_INL_H_

View File

@ -1,84 +0,0 @@
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Author: Alexander Levitskiy
//
// Generalizes the plethora of ARM flavors available to an easier to manage set
// Defs reference is at https://wiki.edubuntu.org/ARM/Thumb2PortingHowto
#ifndef ARM_INSTRUCTION_SET_SELECT_H_
#define ARM_INSTRUCTION_SET_SELECT_H_
#if defined(__ARM_ARCH_8A__)
# define ARMV8 1
#endif
#if defined(ARMV8) || \
defined(__ARM_ARCH_7__) || \
defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7A__)
# define ARMV7 1
#endif
#if defined(ARMV7) || \
defined(__ARM_ARCH_6__) || \
defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || \
defined(__ARM_ARCH_6Z__) || \
defined(__ARM_ARCH_6T2__) || \
defined(__ARM_ARCH_6ZK__)
# define ARMV6 1
#endif
#if defined(ARMV6) || \
defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5E__) || \
defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__)
# define ARMV5 1
#endif
#if defined(ARMV5) || \
defined(__ARM_ARCH_4__) || \
defined(__ARM_ARCH_4T__)
# define ARMV4 1
#endif
#if defined(ARMV4) || \
defined(__ARM_ARCH_3__) || \
defined(__ARM_ARCH_3M__)
# define ARMV3 1
#endif
#if defined(ARMV3) || \
defined(__ARM_ARCH_2__)
# define ARMV2 1
#endif
#endif // ARM_INSTRUCTION_SET_SELECT_H_

View File

@ -1,228 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2003, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// Author: Lei Zhang, Sasha Levitskiy
//
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
// LinuxKernelCmpxchg is from Google Gears.
#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
#define BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h"
typedef int32_t Atomic32;
namespace base {
namespace subtle {
typedef int64_t Atomic64;
// 0xffff0fc0 is the hard coded address of a function provided by
// the kernel which implements an atomic compare-exchange. On older
// ARM architecture revisions (pre-v6) this may be implemented using
// a syscall. This address is stable, and in active use (hard coded)
// by at least glibc-2.7 and the Android C library.
// pLinuxKernelCmpxchg has both acquire and release barrier sematincs.
typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
Atomic32 new_value,
volatile Atomic32* ptr);
LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg ATTRIBUTE_WEAK =
(LinuxKernelCmpxchgFunc) 0xffff0fc0;
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier ATTRIBUTE_WEAK =
(LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = *ptr;
do {
if (!pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
pLinuxKernelMemoryBarrier();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions are not implemented yet.
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
function_name);
abort();
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_CompareAndSwap");
return 0;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_AtomicExchange");
return 0;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("NoBarrier_Store");
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("Acquire_Store64");
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("Release_Store");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("NoBarrier_Load");
return 0;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("Atomic64 Acquire_Load");
return 0;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("Atomic64 Release_Load");
return 0;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("Atomic64 Acquire_CompareAndSwap");
return 0;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("Atomic64 Release_CompareAndSwap");
return 0;
}
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_

View File

@ -1,330 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// Author: Sasha Levitskiy
// based on atomicops-internals by Sanjay Ghemawat
//
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
// This code implements ARM atomics for architectures V6 and newer.
#ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
#define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h" // For COMPILE_ASSERT
// The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6,
// only some variants support it. For simplicity, we only use exclusive
// 64-bit load/store in V7 or above.
#if defined(ARMV7)
# define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
#endif
typedef int32_t Atomic32;
namespace base {
namespace subtle {
typedef int64_t Atomic64;
// 32-bit low-level ops
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 oldval, res;
do {
__asm__ __volatile__(
"ldrex %1, [%3]\n"
"mov %0, #0\n"
"teq %1, %4\n"
// The following IT (if-then) instruction is needed for the subsequent
// conditional instruction STREXEQ when compiling in THUMB mode.
// In ARM mode, the compiler/assembler will not generate any code for it.
"it eq\n"
"strexeq %0, %5, [%3]\n"
: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
: "r" (ptr), "Ir" (old_value), "r" (new_value)
: "cc");
} while (res);
return oldval;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 tmp, old;
__asm__ __volatile__(
"1:\n"
"ldrex %1, [%2]\n"
"strex %0, %3, [%2]\n"
"teq %0, #0\n"
"bne 1b"
: "=&r" (tmp), "=&r" (old)
: "r" (ptr), "r" (new_value)
: "cc", "memory");
return old;
}
inline void MemoryBarrier() {
#if !defined(ARMV7)
uint32_t dest = 0;
__asm__ __volatile__("mcr p15,0,%0,c7,c10,5" :"=&r"(dest) : : "memory");
#else
__asm__ __volatile__("dmb" : : : "memory");
#endif
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions are only available if LDREXD and STREXD instructions
// are available.
#ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
#define BASE_HAS_ATOMIC64 1
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 oldval, res;
do {
__asm__ __volatile__(
"ldrexd %1, [%3]\n"
"mov %0, #0\n"
"teq %Q1, %Q4\n"
// The following IT (if-then) instructions are needed for the subsequent
// conditional instructions when compiling in THUMB mode.
// In ARM mode, the compiler/assembler will not generate any code for it.
"it eq\n"
"teqeq %R1, %R4\n"
"it eq\n"
"strexdeq %0, %5, [%3]\n"
: "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
: "r" (ptr), "Ir" (old_value), "r" (new_value)
: "cc");
} while (res);
return oldval;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
int store_failed;
Atomic64 old;
__asm__ __volatile__(
"1:\n"
"ldrexd %1, [%2]\n"
"strexd %0, %3, [%2]\n"
"teq %0, #0\n"
"bne 1b"
: "=&r" (store_failed), "=&r" (old)
: "r" (ptr), "r" (new_value)
: "cc", "memory");
return old;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
int store_failed;
Atomic64 dummy;
__asm__ __volatile__(
"1:\n"
// Dummy load to lock cache line.
"ldrexd %1, [%3]\n"
"strexd %0, %2, [%3]\n"
"teq %0, #0\n"
"bne 1b"
: "=&r" (store_failed), "=&r"(dummy)
: "r"(value), "r" (ptr)
: "cc", "memory");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
Atomic64 res;
__asm__ __volatile__(
"ldrexd %0, [%1]\n"
"clrex\n"
: "=r" (res)
: "r"(ptr), "Q"(*ptr));
return res;
}
#else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
function_name);
abort();
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_CompareAndSwap");
return 0;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_AtomicExchange");
return 0;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("Acquire_AtomicExchange");
return 0;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("Release_AtomicExchange");
return 0;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("NoBarrier_Store");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("NoBarrier_Load");
return 0;
}
#endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
NoBarrier_Store(ptr, value);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = NoBarrier_Load(ptr);
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
} // namespace subtle ends
} // namespace base ends
#endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_

View File

@ -1,203 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2014, Linaro
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// Author: Riku Voipio, riku.voipio@linaro.org
//
// atomic primitives implemented with gcc atomic intrinsics:
// http://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
//
#ifndef BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_
#define BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h"
typedef int32_t Atomic32;
namespace base {
namespace subtle {
typedef int64_t Atomic64;
inline void MemoryBarrier() {
__sync_synchronize();
}
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_RELAXED);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_ACQUIRE);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_RELEASE);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
return prev_value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_RELAXED);
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_ACQUIRE);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_RELEASE);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
return prev_value;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_

View File

@ -1,437 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
*/
// Implementation of atomic operations for ppc-linux. This file should not
// be included directly. Clients should instead include
// "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
#define BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
typedef int32_t Atomic32;
#ifdef __PPC64__
#define BASE_HAS_ATOMIC64 1
#endif
namespace base {
namespace subtle {
static inline void _sync(void) {
__asm__ __volatile__("sync": : : "memory");
}
static inline void _lwsync(void) {
// gcc defines __NO_LWSYNC__ when appropriate; see
// http://gcc.gnu.org/ml/gcc-patches/2006-11/msg01238.html
#ifdef __NO_LWSYNC__
__asm__ __volatile__("msync": : : "memory");
#else
__asm__ __volatile__("lwsync": : : "memory");
#endif
}
static inline void _isync(void) {
__asm__ __volatile__("isync": : : "memory");
}
static inline Atomic32 OSAtomicAdd32(Atomic32 amount, Atomic32 *value) {
Atomic32 t;
__asm__ __volatile__(
"1: lwarx %0,0,%3\n\
add %0,%2,%0\n\
stwcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (t), "+m" (*value)
: "r" (amount), "r" (value)
: "cc");
return t;
}
static inline Atomic32 OSAtomicAdd32Barrier(Atomic32 amount, Atomic32 *value) {
Atomic32 t;
_lwsync();
t = OSAtomicAdd32(amount, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in OSAtomicAdd32) has a
// conditional branch with a data dependency on the update.
// Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline bool OSAtomicCompareAndSwap32(Atomic32 old_value,
Atomic32 new_value,
Atomic32 *value) {
Atomic32 prev;
__asm__ __volatile__(
"1: lwarx %0,0,%2\n\
cmpw 0,%0,%3\n\
bne- 2f\n\
stwcx. %4,0,%2\n\
bne- 1b\n\
2:"
: "=&r" (prev), "+m" (*value)
: "r" (value), "r" (old_value), "r" (new_value)
: "cc");
return prev == old_value;
}
static inline Atomic32 OSAtomicCompareAndSwap32Acquire(Atomic32 old_value,
Atomic32 new_value,
Atomic32 *value) {
Atomic32 t;
t = OSAtomicCompareAndSwap32(old_value, new_value, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in
// OSAtomicCompareAndSwap32) has a conditional branch with a data
// dependency on the update. Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline Atomic32 OSAtomicCompareAndSwap32Release(Atomic32 old_value,
Atomic32 new_value,
Atomic32 *value) {
_lwsync();
return OSAtomicCompareAndSwap32(old_value, new_value, value);
}
typedef int64_t Atomic64;
inline void MemoryBarrier() {
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
}
// 32-bit Versions.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32Acquire(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32Release(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Acquire(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Release(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
#ifdef __PPC64__
// 64-bit Versions.
static inline Atomic64 OSAtomicAdd64(Atomic64 amount, Atomic64 *value) {
Atomic64 t;
__asm__ __volatile__(
"1: ldarx %0,0,%3\n\
add %0,%2,%0\n\
stdcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (t), "+m" (*value)
: "r" (amount), "r" (value)
: "cc");
return t;
}
static inline Atomic64 OSAtomicAdd64Barrier(Atomic64 amount, Atomic64 *value) {
Atomic64 t;
_lwsync();
t = OSAtomicAdd64(amount, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in OSAtomicAdd64) has a
// conditional branch with a data dependency on the update.
// Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline bool OSAtomicCompareAndSwap64(Atomic64 old_value,
Atomic64 new_value,
Atomic64 *value) {
Atomic64 prev;
__asm__ __volatile__(
"1: ldarx %0,0,%2\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
stdcx. %4,0,%2\n\
bne- 1b\n\
2:"
: "=&r" (prev), "+m" (*value)
: "r" (value), "r" (old_value), "r" (new_value)
: "cc");
return prev == old_value;
}
static inline Atomic64 OSAtomicCompareAndSwap64Acquire(Atomic64 old_value,
Atomic64 new_value,
Atomic64 *value) {
Atomic64 t;
t = OSAtomicCompareAndSwap64(old_value, new_value, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in
// OSAtomicCompareAndSwap64) has a conditional branch with a data
// dependency on the update. Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline Atomic64 OSAtomicCompareAndSwap64Release(Atomic64 old_value,
Atomic64 new_value,
Atomic64 *value) {
_lwsync();
return OSAtomicCompareAndSwap64(old_value, new_value, value);
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64Acquire(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64Release(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Acquire(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Release(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
#endif
inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
}
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
_lwsync();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
Atomic32 value = *ptr;
_lwsync();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
return *ptr;
}
#ifdef __PPC64__
// 64-bit Versions.
inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
*ptr = value;
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
_lwsync();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
Atomic64 value = *ptr;
_lwsync();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
return *ptr;
}
#endif
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_

View File

@ -1,370 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Implementation of atomic operations for Mac OS X. This file should not
// be included directly. Clients should instead include
// "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_MACOSX_H_
#define BASE_ATOMICOPS_INTERNALS_MACOSX_H_
typedef int32_t Atomic32;
// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
// on the Mac, even when they are the same size. Similarly, on __ppc64__,
// AtomicWord and Atomic64 are always different. Thus, we need explicit
// casting.
#ifdef __LP64__
#define AtomicWordCastType base::subtle::Atomic64
#else
#define AtomicWordCastType Atomic32
#endif
#if defined(__LP64__) || defined(__i386__)
#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
#endif
#include <libkern/OSAtomic.h>
namespace base {
namespace subtle {
#if !defined(__LP64__) && defined(__ppc__)
// The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
// while the underlying assembly instructions are available only some
// implementations of PowerPC.
// The following inline functions will fail with the error message at compile
// time ONLY IF they are called. So it is safe to use this header if user
// code only calls AtomicWord and Atomic32 operations.
//
// NOTE(vchen): Implementation notes to implement the atomic ops below may
// be found in "PowerPC Virtual Environment Architecture, Book II,
// Version 2.02", January 28, 2005, Appendix B, page 46. Unfortunately,
// extra care must be taken to ensure data are properly 8-byte aligned, and
// that data are returned correctly according to Mac OS X ABI specs.
inline int64_t OSAtomicCompareAndSwap64(
int64_t oldValue, int64_t newValue, int64_t *theValue) {
__asm__ __volatile__(
"_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
return 0;
}
inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
__asm__ __volatile__(
"_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
return 0;
}
inline int64_t OSAtomicCompareAndSwap64Barrier(
int64_t oldValue, int64_t newValue, int64_t *theValue) {
int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
OSMemoryBarrier();
return prev;
}
inline int64_t OSAtomicAdd64Barrier(
int64_t theAmount, int64_t *theValue) {
int64_t new_val = OSAtomicAdd64(theAmount, theValue);
OSMemoryBarrier();
return new_val;
}
#endif
typedef int64_t Atomic64;
inline void MemoryBarrier() {
OSMemoryBarrier();
}
// 32-bit Versions.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
return Acquire_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit version
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
return Acquire_AtomicExchange(ptr, new_value);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
// The lib kern interface does not distinguish between
// Acquire and Release memory barriers; they are equivalent.
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
#ifdef __LP64__
// 64-bit implementation on 64-bit platform
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
MemoryBarrier();
return *ptr;
}
#else
// 64-bit implementation on 32-bit platform
#if defined(__ppc__)
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__(
"_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
__asm__ __volatile__(
"_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
return 0;
}
#elif defined(__i386__)
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Reset FP registers
: "=m" (*ptr)
: "m" (value)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
Atomic64 value;
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Reset FP registers
: "=m" (value)
: "m" (*ptr)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
return value;
}
#endif
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
MemoryBarrier();
NoBarrier_Store(ptr, value);
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
Atomic64 value = NoBarrier_Load(ptr);
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
#endif // __LP64__
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_MACOSX_H_

View File

@ -1,323 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2013, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Author: Jovan Zelincevic <jovan.zelincevic@imgtec.com>
// based on atomicops-internals by Sanjay Ghemawat
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
// This code implements MIPS atomics.
#ifndef BASE_ATOMICOPS_INTERNALS_MIPS_H_
#define BASE_ATOMICOPS_INTERNALS_MIPS_H_
#if (_MIPS_ISA == _MIPS_ISA_MIPS64)
#define BASE_HAS_ATOMIC64 1
#endif
typedef int32_t Atomic32;
namespace base {
namespace subtle {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value)
{
Atomic32 prev, tmp;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"ll %0, %5 \n" // prev = *ptr
"bne %0, %3, 2f \n" // if (prev != old_value) goto 2
" move %2, %4 \n" // tmp = new_value
"sc %2, %1 \n" // *ptr = tmp (with atomic check)
"beqz %2, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
"2: \n"
".set pop \n"
: "=&r" (prev), "=m" (*ptr),
"=&r" (tmp)
: "Ir" (old_value), "r" (new_value),
"m" (*ptr)
: "memory"
);
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value)
{
Atomic32 temp, old;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"ll %1, %2 \n" // old = *ptr
"move %0, %3 \n" // temp = new_value
"sc %0, %2 \n" // *ptr = temp (with atomic check)
"beqz %0, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
".set pop \n"
: "=&r" (temp), "=&r" (old),
"=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory"
);
return old;
}
inline void MemoryBarrier()
{
__asm__ volatile("sync" : : : "memory");
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value)
{
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value)
{
MemoryBarrier();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return res;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value)
{
*ptr = value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value)
{
Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value)
{
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value)
{
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value)
{
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr)
{
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr)
{
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr)
{
MemoryBarrier();
return *ptr;
}
#if (_MIPS_ISA == _MIPS_ISA_MIPS64) || (_MIPS_SIM == _MIPS_SIM_ABI64)
typedef int64_t Atomic64;
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value)
{
Atomic64 prev, tmp;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"lld %0, %5 \n" // prev = *ptr
"bne %0, %3, 2f \n" // if (prev != old_value) goto 2
" move %2, %4 \n" // tmp = new_value
"scd %2, %1 \n" // *ptr = tmp (with atomic check)
"beqz %2, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
"2: \n"
".set pop \n"
: "=&r" (prev), "=m" (*ptr),
"=&r" (tmp)
: "Ir" (old_value), "r" (new_value),
"m" (*ptr)
: "memory"
);
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value)
{
Atomic64 temp, old;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"lld %1, %2 \n" // old = *ptr
"move %0, %3 \n" // temp = new_value
"scd %0, %2 \n" // *ptr = temp (with atomic check)
"beqz %0, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
".set pop \n"
: "=&r" (temp), "=&r" (old),
"=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory"
);
return old;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value)
{
Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value)
{
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value)
{
MemoryBarrier();
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return res;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value)
{
*ptr = value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value)
{
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value)
{
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value)
{
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr)
{
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr)
{
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr)
{
MemoryBarrier();
return *ptr;
}
#endif
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_MIPS_H_

View File

@ -1,457 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// Implementation of atomic operations using Windows API
// functions. This file should not be included directly. Clients
// should instead include "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_WINDOWS_H_
#define BASE_ATOMICOPS_INTERNALS_WINDOWS_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h" // For COMPILE_ASSERT
typedef int32 Atomic32;
#if defined(_WIN64)
#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
#endif
namespace base {
namespace subtle {
typedef int64 Atomic64;
// 32-bit low-level operations on any platform
extern "C" {
// We use windows intrinsics when we can (they seem to be supported
// well on MSVC 8.0 and above). Unfortunately, in some
// environments, <windows.h> and <intrin.h> have conflicting
// declarations of some other intrinsics, breaking compilation:
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
// Therefore, we simply declare the relevant intrinsics ourself.
// MinGW has a bug in the header files where it doesn't indicate the
// first argument is volatile -- they're not up to date. See
// http://readlist.com/lists/lists.sourceforge.net/mingw-users/0/3861.html
// We have to const_cast away the volatile to avoid compiler warnings.
// TODO(csilvers): remove this once MinGW has updated MinGW/include/winbase.h
#if defined(__MINGW32__)
inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
LONG newval, LONG oldval) {
return ::InterlockedCompareExchange(const_cast<LONG*>(ptr), newval, oldval);
}
inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
return ::InterlockedExchange(const_cast<LONG*>(ptr), newval);
}
inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
return ::InterlockedExchangeAdd(const_cast<LONG*>(ptr), increment);
}
#elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0
// Unfortunately, in some environments, <windows.h> and <intrin.h>
// have conflicting declarations of some intrinsics, breaking
// compilation. So we declare the intrinsics we need ourselves. See
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
LONG _InterlockedCompareExchange(volatile LONG* ptr, LONG newval, LONG oldval);
#pragma intrinsic(_InterlockedCompareExchange)
inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
LONG newval, LONG oldval) {
return _InterlockedCompareExchange(ptr, newval, oldval);
}
LONG _InterlockedExchange(volatile LONG* ptr, LONG newval);
#pragma intrinsic(_InterlockedExchange)
inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
return _InterlockedExchange(ptr, newval);
}
LONG _InterlockedExchangeAdd(volatile LONG* ptr, LONG increment);
#pragma intrinsic(_InterlockedExchangeAdd)
inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
return _InterlockedExchangeAdd(ptr, increment);
}
#else
inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
LONG newval, LONG oldval) {
return ::InterlockedCompareExchange(ptr, newval, oldval);
}
inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
return ::InterlockedExchange(ptr, newval);
}
inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
return ::InterlockedExchangeAdd(ptr, increment);
}
#endif // ifdef __MINGW32__
} // extern "C"
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = FastInterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = FastInterlockedExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
} // namespace base::subtle
} // namespace base
// In msvc8/vs2005, winnt.h already contains a definition for
// MemoryBarrier in the global namespace. Add it there for earlier
// versions and forward to it from within the namespace.
#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
inline void MemoryBarrier() {
Atomic32 value = 0;
base::subtle::NoBarrier_AtomicExchange(&value, 0);
// actually acts as a barrier in thisd implementation
}
#endif
namespace base {
namespace subtle {
inline void MemoryBarrier() {
::MemoryBarrier();
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
Acquire_AtomicExchange(ptr, value);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit operations
#if defined(_WIN64) || defined(__MINGW64__)
// 64-bit low-level operations on 64-bit platform.
COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
// These are the intrinsics needed for 64-bit operations. Similar to the
// 32-bit case above.
extern "C" {
#if defined(__MINGW64__)
inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval) {
return ::InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr),
newval, oldval);
}
inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
return ::InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval);
}
inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
LONGLONG increment) {
return ::InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment);
}
#elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0
// Like above, we need to declare the intrinsics ourselves.
PVOID _InterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval);
#pragma intrinsic(_InterlockedCompareExchangePointer)
inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval) {
return _InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr),
newval, oldval);
}
PVOID _InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval);
#pragma intrinsic(_InterlockedExchangePointer)
inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
return _InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval);
}
LONGLONG _InterlockedExchangeAdd64(volatile LONGLONG* ptr, LONGLONG increment);
#pragma intrinsic(_InterlockedExchangeAdd64)
inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
LONGLONG increment) {
return _InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment);
}
#else
inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval) {
return ::InterlockedCompareExchangePointer(ptr, newval, oldval);
}
inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
return ::InterlockedExchangePointer(ptr, newval);
}
inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
LONGLONG increment) {
return ::InterlockedExchangeAdd64(ptr, increment);
}
#endif // ifdef __MINGW64__
} // extern "C"
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
PVOID result = FastInterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
PVOID result = FastInterlockedExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#else // defined(_WIN64) || defined(__MINGW64__)
// 64-bit low-level operations on 32-bit platform
// TODO(vchen): The GNU assembly below must be converted to MSVC inline
// assembly. Then the file should be renamed to ...-x86-msvc.h, probably.
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
function_name);
abort();
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
#if 0 // Not implemented
Atomic64 prev;
__asm__ __volatile__("movl (%3), %%ebx\n\t" // Move 64-bit new_value into
"movl 4(%3), %%ecx\n\t" // ecx:ebx
"lock; cmpxchg8b %1\n\t" // If edx:eax (old_value) same
: "=A" (prev) // as contents of ptr:
: "m" (*ptr), // ecx:ebx => ptr
"0" (old_value), // else:
"r" (&new_value) // old *ptr => edx:eax
: "memory", "%ebx", "%ecx");
return prev;
#else
NotImplementedFatalError("NoBarrier_CompareAndSwap");
return 0;
#endif
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
#if 0 // Not implemented
__asm__ __volatile__(
"movl (%2), %%ebx\n\t" // Move 64-bit new_value into
"movl 4(%2), %%ecx\n\t" // ecx:ebx
"0:\n\t"
"movl %1, %%eax\n\t" // Read contents of ptr into
"movl 4%1, %%edx\n\t" // edx:eax
"lock; cmpxchg8b %1\n\t" // Attempt cmpxchg; if *ptr
"jnz 0b\n\t" // is no longer edx:eax, loop
: "=A" (new_value)
: "m" (*ptr),
"r" (&new_value)
: "memory", "%ebx", "%ecx");
return new_value; // Now it's the previous value.
#else
NotImplementedFatalError("NoBarrier_AtomicExchange");
return 0;
#endif
}
inline void NoBarrier_Store(volatile Atomic64* ptrValue, Atomic64 value)
{
__asm {
movq mm0, value; // Use mmx reg for 64-bit atomic moves
mov eax, ptrValue;
movq [eax], mm0;
emms; // Empty mmx state to enable FP registers
}
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptrValue)
{
Atomic64 value;
__asm {
mov eax, ptrValue;
movq mm0, [eax]; // Use mmx reg for 64-bit atomic moves
movq value, mm0;
emms; // Empty mmx state to enable FP registers
}
return value;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = NoBarrier_Load(ptr);
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
#endif // defined(_WIN64) || defined(__MINGW64__)
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_WINDOWS_H_

View File

@ -1,112 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This module gets enough CPU information to optimize the
* atomicops module on x86.
*/
#include "base/atomicops.h"
#include "base/basictypes.h"
#include "base/googleinit.h"
#include "base/logging.h"
#include <string.h>
// This file only makes sense with atomicops-internals-x86.h -- it
// depends on structs that are defined in that file. If atomicops.h
// doesn't sub-include that file, then we aren't needed, and shouldn't
// try to do anything.
#ifdef BASE_ATOMICOPS_INTERNALS_X86_H_
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
// of the global offset table. To avoid breaking such executables, this code
// must preserve that register's value across cpuid instructions.
#if defined(__i386__)
#define cpuid(a, b, c, d, inp) \
asm ("mov %%ebx, %%edi\n" \
"cpuid\n" \
"xchg %%edi, %%ebx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#elif defined (__x86_64__)
#define cpuid(a, b, c, d, inp) \
asm ("mov %%rbx, %%rdi\n" \
"cpuid\n" \
"xchg %%rdi, %%rbx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#endif
#if defined(cpuid) // initialize the struct only on x86
// Set the flags so that code will run correctly and conservatively
// until InitGoogle() is called.
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // no SSE2
false // no cmpxchg16b
};
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
static void AtomicOps_Internalx86CPUFeaturesInit() {
uint32 eax;
uint32 ebx;
uint32 ecx;
uint32 edx;
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
memcpy(vendor, &ebx, 4);
memcpy(vendor + 4, &edx, 4);
memcpy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax
cpuid(eax, ebx, ecx, edx, 1);
int family = (eax >> 8) & 0xf; // family and model fields
int model = (eax >> 4) & 0xf;
if (family == 0xf) { // use extended family and model fields
family += (eax >> 20) & 0xff;
model += ((eax >> 16) & 0xf) << 4;
}
// edx bit 26 is SSE2 which we use to tell use whether we can use mfence
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
// ecx bit 13 indicates whether the cmpxchg16b instruction is supported
AtomicOps_Internalx86CPUFeatures.has_cmpxchg16b = ((ecx >> 13) & 1);
}
REGISTER_MODULE_INITIALIZER(atomicops_x86, {
AtomicOps_Internalx86CPUFeaturesInit();
});
#endif
#endif /* ifdef BASE_ATOMICOPS_INTERNALS_X86_H_ */

View File

@ -1,391 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// Implementation of atomic operations for x86. This file should not
// be included directly. Clients should instead include
// "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_X86_H_
#define BASE_ATOMICOPS_INTERNALS_X86_H_
#include "base/basictypes.h"
typedef int32_t Atomic32;
#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
// NOTE(vchen): x86 does not need to define AtomicWordCastType, because it
// already matches Atomic32 or Atomic64, depending on the platform.
// This struct is not part of the public API of this module; clients may not
// use it.
// Features of this x86. Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
bool has_sse2; // Processor has SSE2.
bool has_cmpxchg16b; // Processor supports cmpxchg16b instruction.
};
ATTRIBUTE_VISIBILITY_HIDDEN
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace base {
namespace subtle {
typedef int64_t Atomic64;
// 32-bit low-level operations on any platform.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
__asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_val = NoBarrier_AtomicExchange(ptr, new_value);
return old_val;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// xchgl already has release memory barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
#if defined(__x86_64__)
// 64-bit implementations of memory barrier can be simpler, because it
// "mfence" is guaranteed to exist.
inline void MemoryBarrier() {
__asm__ __volatile__("mfence" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
#else
inline void MemoryBarrier() {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
__asm__ __volatile__("mfence" : : : "memory");
} else { // mfence is faster but not present on PIII
Atomic32 x = 0;
Acquire_AtomicExchange(&x, 0);
}
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
*ptr = value;
__asm__ __volatile__("mfence" : : : "memory");
} else {
Acquire_AtomicExchange(ptr, value);
}
}
#endif
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier.
// See comments in Atomic64 version of Release_Store(), below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
// See comments in Atomic64 version of Release_Store(), below.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(__x86_64__)
// 64-bit low-level operations on 64-bit platform.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
__asm__ __volatile__("lock; cmpxchgq %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
__asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_value);
return old_val;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// xchgq already has release memory barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier
// for current AMD/Intel chips as of Jan 2008.
// See also Acquire_Load(), below.
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
//
// x86 stores/loads fail to act as barriers for a few instructions (clflush
// maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
// not generated by the compiler, and are rare. Users of these instructions
// need to know about cache behaviour in any case since all of these involve
// either flushing cache lines or non-temporal cache hints.
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
// for current AMD/Intel chips as of Jan 2008.
// See also Release_Store(), above.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#else // defined(__x86_64__)
// 64-bit low-level operations on 32-bit platform.
#if !((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
// For compilers older than gcc 4.1, we use inline asm.
//
// Potential pitfalls:
//
// 1. %ebx points to Global offset table (GOT) with -fPIC.
// We need to preserve this register.
// 2. When explicit registers are used in inline asm, the
// compiler may not be aware of it and might try to reuse
// the same register for another argument which has constraints
// that allow it ("r" for example).
inline Atomic64 __sync_val_compare_and_swap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
__asm__ __volatile__("push %%ebx\n\t"
"movl (%3), %%ebx\n\t" // Move 64-bit new_value into
"movl 4(%3), %%ecx\n\t" // ecx:ebx
"lock; cmpxchg8b (%1)\n\t"// If edx:eax (old_value) same
"pop %%ebx\n\t"
: "=A" (prev) // as contents of ptr:
: "D" (ptr), // ecx:ebx => ptr
"0" (old_value), // else:
"S" (&new_value) // old *ptr => edx:eax
: "memory", "%ecx");
return prev;
}
#endif // Compiler < gcc-4.1
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_val,
Atomic64 new_val) {
return __sync_val_compare_and_swap(ptr, old_val, new_val);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_val) {
Atomic64 old_val;
do {
old_val = *ptr;
} while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val);
return old_val;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_val) {
Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_val);
return old_val;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_val) {
return NoBarrier_AtomicExchange(ptr, new_val);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Empty mmx state/Reset FP regs
: "=m" (*ptr)
: "m" (value)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
ATOMICOPS_COMPILER_BARRIER();
NoBarrier_Store(ptr, value);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
Atomic64 value;
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Empty mmx state/Reset FP regs
: "=m" (value)
: "m" (*ptr)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
return value;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = NoBarrier_Load(ptr);
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
#endif // defined(__x86_64__)
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return x;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
} // namespace base::subtle
} // namespace base
#undef ATOMICOPS_COMPILER_BARRIER
#endif // BASE_ATOMICOPS_INTERNALS_X86_H_

View File

@ -1,399 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// For atomic operations on statistics counters, see atomic_stats_counter.h.
// For atomic operations on sequence numbers, see atomic_sequence_num.h.
// For atomic operations on reference counts, see atomic_refcount.h.
// Some fast atomic operations -- typically with machine-dependent
// implementations. This file may need editing as Google code is
// ported to different architectures.
// The routines exported by this module are subtle. If you use them, even if
// you get the code right, it will depend on careful reasoning about atomicity
// and memory ordering; it will be less readable, and harder to maintain. If
// you plan to use these routines, you should have a good reason, such as solid
// evidence that performance would otherwise suffer, or there being no
// alternative. You should assume only properties explicitly guaranteed by the
// specifications in this file. You are almost certainly _not_ writing code
// just for the x86; if you assume x86 semantics, x86 hardware bugs and
// implementations on other archtectures will cause your code to break. If you
// do not know what you are doing, avoid these routines, and use a Mutex.
//
// These following lower-level operations are typically useful only to people
// implementing higher-level synchronization operations like spinlocks,
// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
// a store with appropriate memory-ordering instructions. "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
//
// It is incorrect to make direct assignments to/from an atomic variable.
// You should use one of the Load or Store routines. The NoBarrier
// versions are provided when no barriers are needed:
// NoBarrier_Store()
// NoBarrier_Load()
// Although there are currently no compiler enforcement, you are encouraged
// to use these. Moreover, if you choose to use base::subtle::Atomic64 type,
// you MUST use one of the Load or Store routines to get correct behavior
// on 32-bit platforms.
//
// The intent is eventually to put all of these routines in namespace
// base::subtle
#ifndef THREAD_ATOMICOPS_H_
#define THREAD_ATOMICOPS_H_
#include "../config.h"
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
// ------------------------------------------------------------------------
// Include the platform specific implementations of the types
// and operations listed below. Implementations are to provide Atomic32
// and Atomic64 operations. If there is a mismatch between intptr_t and
// the Atomic32 or Atomic64 types for a platform, the platform-specific header
// should define the macro, AtomicWordCastType in a clause similar to the
// following:
// #if ...pointers are 64 bits...
// # define AtomicWordCastType base::subtle::Atomic64
// #else
// # define AtomicWordCastType Atomic32
// #endif
// TODO(csilvers): figure out ARCH_PIII/ARCH_K8 (perhaps via ./configure?)
// ------------------------------------------------------------------------
#include "base/arm_instruction_set_select.h"
#define GCC_VERSION (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
#define CLANG_VERSION (__clang_major__ * 10000 \
+ __clang_minor__ * 100 \
+ __clang_patchlevel__)
#if defined(TCMALLOC_PREFER_GCC_ATOMICS) && defined(__GNUC__) && GCC_VERSION >= 40700
#include "base/atomicops-internals-gcc.h"
#elif defined(TCMALLOC_PREFER_GCC_ATOMICS) && defined(__clang__) && CLANG_VERSION >= 30400
#include "base/atomicops-internals-gcc.h"
#elif defined(__MACH__) && defined(__APPLE__)
#include "base/atomicops-internals-macosx.h"
#elif defined(__GNUC__) && defined(ARMV6)
#include "base/atomicops-internals-arm-v6plus.h"
#elif defined(ARMV3)
#include "base/atomicops-internals-arm-generic.h"
#elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__))
#include "base/atomicops-internals-x86.h"
#elif defined(_WIN32)
#include "base/atomicops-internals-windows.h"
#elif defined(__linux__) && defined(__PPC__)
#include "base/atomicops-internals-linuxppc.h"
#elif defined(__GNUC__) && defined(__mips__)
#include "base/atomicops-internals-mips.h"
#elif defined(__GNUC__) && GCC_VERSION >= 40700
#include "base/atomicops-internals-gcc.h"
#elif defined(__clang__) && CLANG_VERSION >= 30400
#include "base/atomicops-internals-gcc.h"
#else
#error You need to implement atomic operations for this architecture
#endif
// Signed type that can hold a pointer and supports the atomic ops below, as
// well as atomic loads and stores. Instances must be naturally-aligned.
typedef intptr_t AtomicWord;
#ifdef AtomicWordCastType
// ------------------------------------------------------------------------
// This section is needed only when explicit type casting is required to
// cast AtomicWord to one of the basic atomic types (Atomic64 or Atomic32).
// It also serves to document the AtomicWord interface.
// ------------------------------------------------------------------------
namespace base {
namespace subtle {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return NoBarrier_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return NoBarrier_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord Acquire_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return Acquire_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord Release_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return Release_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Acquire_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Release_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
NoBarrier_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Acquire_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Release_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
return NoBarrier_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return base::subtle::Acquire_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return base::subtle::Release_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
} // namespace base::subtle
} // namespace base
#endif // AtomicWordCastType
// ------------------------------------------------------------------------
// Commented out type definitions and method declarations for documentation
// of the interface provided by this module.
// ------------------------------------------------------------------------
#if 0
// Signed 32-bit type that supports the atomic ops below, as well as atomic
// loads and stores. Instances must be naturally aligned. This type differs
// from AtomicWord in 64-bit binaries where AtomicWord is 64-bits.
typedef int32_t Atomic32;
// Corresponding operations on Atomic32
namespace base {
namespace subtle {
// Signed 64-bit type that supports the atomic ops below, as well as atomic
// loads and stores. Instances must be naturally aligned. This type differs
// from AtomicWord in 32-bit binaries where AtomicWord is 32-bits.
typedef int64_t Atomic64;
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
Atomic32 Release_Load(volatile const Atomic32* ptr);
// Corresponding operations on Atomic64
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
Atomic64 Release_Load(volatile const Atomic64* ptr);
} // namespace base::subtle
} // namespace base
void MemoryBarrier();
#endif // 0
// ------------------------------------------------------------------------
// The following are to be deprecated when all uses have been changed to
// use the base::subtle namespace.
// ------------------------------------------------------------------------
#ifdef AtomicWordCastType
// AtomicWord versions to be deprecated
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Acquire_Store(ptr, value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Release_Store(ptr, value);
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return base::subtle::Acquire_Load(ptr);
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return base::subtle::Release_Load(ptr);
}
#endif // AtomicWordCastType
// 32-bit Acquire/Release operations to be deprecated.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
base::subtle::Acquire_Store(ptr, value);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
return base::subtle::Release_Store(ptr, value);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return base::subtle::Acquire_Load(ptr);
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return base::subtle::Release_Load(ptr);
}
#ifdef BASE_HAS_ATOMIC64
// 64-bit Acquire/Release operations to be deprecated.
inline base::subtle::Atomic64 Acquire_CompareAndSwap(
volatile base::subtle::Atomic64* ptr,
base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline base::subtle::Atomic64 Release_CompareAndSwap(
volatile base::subtle::Atomic64* ptr,
base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
}
inline void Acquire_Store(
volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
base::subtle::Acquire_Store(ptr, value);
}
inline void Release_Store(
volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
return base::subtle::Release_Store(ptr, value);
}
inline base::subtle::Atomic64 Acquire_Load(
volatile const base::subtle::Atomic64* ptr) {
return base::subtle::Acquire_Load(ptr);
}
inline base::subtle::Atomic64 Release_Load(
volatile const base::subtle::Atomic64* ptr) {
return base::subtle::Release_Load(ptr);
}
#endif // BASE_HAS_ATOMIC64
#endif // THREAD_ATOMICOPS_H_

View File

@ -1,408 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef _BASICTYPES_H_
#define _BASICTYPES_H_
#include "../config.h"
#include <string.h> // for memcpy()
#ifdef HAVE_INTTYPES_H
#include <inttypes.h> // gets us PRId64, etc
#endif
// To use this in an autoconf setting, make sure you run the following
// autoconf macros:
// AC_HEADER_STDC /* for stdint_h and inttypes_h */
// AC_CHECK_TYPES([__int64]) /* defined in some windows platforms */
#ifdef HAVE_INTTYPES_H
#include <inttypes.h> // uint16_t might be here; PRId64 too.
#endif
#ifdef HAVE_STDINT_H
#include <stdint.h> // to get uint16_t (ISO naming madness)
#endif
#include <sys/types.h> // our last best hope for uint16_t
// Standard typedefs
// All Google code is compiled with -funsigned-char to make "char"
// unsigned. Google code therefore doesn't need a "uchar" type.
// TODO(csilvers): how do we make sure unsigned-char works on non-gcc systems?
typedef signed char schar;
typedef int8_t int8;
typedef int16_t int16;
typedef int32_t int32;
typedef int64_t int64;
// NOTE: unsigned types are DANGEROUS in loops and other arithmetical
// places. Use the signed types unless your variable represents a bit
// pattern (eg a hash value) or you really need the extra bit. Do NOT
// use 'unsigned' to express "this value should always be positive";
// use assertions for this.
typedef uint8_t uint8;
typedef uint16_t uint16;
typedef uint32_t uint32;
typedef uint64_t uint64;
const uint16 kuint16max = ( (uint16) 0xFFFF);
const uint32 kuint32max = ( (uint32) 0xFFFFFFFF);
const uint64 kuint64max = ( (((uint64) kuint32max) << 32) | kuint32max );
const int8 kint8max = ( ( int8) 0x7F);
const int16 kint16max = ( ( int16) 0x7FFF);
const int32 kint32max = ( ( int32) 0x7FFFFFFF);
const int64 kint64max = ( ((( int64) kint32max) << 32) | kuint32max );
const int8 kint8min = ( ( int8) 0x80);
const int16 kint16min = ( ( int16) 0x8000);
const int32 kint32min = ( ( int32) 0x80000000);
const int64 kint64min = ( (((uint64) kint32min) << 32) | 0 );
// Define the "portable" printf and scanf macros, if they're not
// already there (via the inttypes.h we #included above, hopefully).
// Mostly it's old systems that don't support inttypes.h, so we assume
// they're 32 bit.
#ifndef PRIx64
#define PRIx64 "llx"
#endif
#ifndef SCNx64
#define SCNx64 "llx"
#endif
#ifndef PRId64
#define PRId64 "lld"
#endif
#ifndef SCNd64
#define SCNd64 "lld"
#endif
#ifndef PRIu64
#define PRIu64 "llu"
#endif
#ifndef PRIxPTR
#define PRIxPTR "lx"
#endif
// Also allow for printing of a pthread_t.
#define GPRIuPTHREAD "lu"
#define GPRIxPTHREAD "lx"
#if defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__APPLE__) || defined(__FreeBSD__)
#define PRINTABLE_PTHREAD(pthreadt) reinterpret_cast<uintptr_t>(pthreadt)
#else
#define PRINTABLE_PTHREAD(pthreadt) pthreadt
#endif
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
// An alternate name that leaves out the moral judgment... :-)
#define DISALLOW_COPY_AND_ASSIGN(TypeName) DISALLOW_EVIL_CONSTRUCTORS(TypeName)
// The COMPILE_ASSERT macro can be used to verify that a compile time
// expression is true. For example, you could use it to verify the
// size of a static array:
//
// COMPILE_ASSERT(sizeof(num_content_type_names) == sizeof(int),
// content_type_names_incorrect_size);
//
// or to make sure a struct is smaller than a certain size:
//
// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
//
// The second argument to the macro is the name of the variable. If
// the expression is false, most compilers will issue a warning/error
// containing the name of the variable.
//
// Implementation details of COMPILE_ASSERT:
//
// - COMPILE_ASSERT works by defining an array type that has -1
// elements (and thus is invalid) when the expression is false.
//
// - The simpler definition
//
// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
//
// does not work, as gcc supports variable-length arrays whose sizes
// are determined at run-time (this is gcc's extension and not part
// of the C++ standard). As a result, gcc fails to reject the
// following code with the simple definition:
//
// int foo;
// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
// // not a compile-time constant.
//
// - By using the type CompileAssert<(bool(expr))>, we ensures that
// expr is a compile-time constant. (Template arguments must be
// determined at compile-time.)
//
// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
//
// CompileAssert<bool(expr)>
//
// instead, these compilers will refuse to compile
//
// COMPILE_ASSERT(5 > 0, some_message);
//
// (They seem to think the ">" in "5 > 0" marks the end of the
// template argument list.)
//
// - The array size is (bool(expr) ? 1 : -1), instead of simply
//
// ((expr) ? 1 : -1).
//
// This is to avoid running into a bug in MS VC 7.1, which
// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
template <bool>
struct CompileAssert {
};
#ifdef HAVE___ATTRIBUTE__
# define ATTRIBUTE_UNUSED __attribute__((unused))
#else
# define ATTRIBUTE_UNUSED
#endif
#if defined(HAVE___ATTRIBUTE__) && defined(HAVE_TLS)
#define ATTR_INITIAL_EXEC __attribute__ ((tls_model ("initial-exec")))
#else
#define ATTR_INITIAL_EXEC
#endif
#define COMPILE_ASSERT(expr, msg) \
typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] ATTRIBUTE_UNUSED
#define arraysize(a) (sizeof(a) / sizeof(*(a)))
#define OFFSETOF_MEMBER(strct, field) \
(reinterpret_cast<char*>(&reinterpret_cast<strct*>(16)->field) - \
reinterpret_cast<char*>(16))
// bit_cast<Dest,Source> implements the equivalent of
// "*reinterpret_cast<Dest*>(&source)".
//
// The reinterpret_cast method would produce undefined behavior
// according to ISO C++ specification section 3.10 -15 -.
// bit_cast<> calls memcpy() which is blessed by the standard,
// especially by the example in section 3.9.
//
// Fortunately memcpy() is very fast. In optimized mode, with a
// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
// code with the minimal amount of data movement. On a 32-bit system,
// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
// compiles to two loads and two stores.
template <class Dest, class Source>
inline Dest bit_cast(const Source& source) {
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes);
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
}
// bit_store<Dest,Source> implements the equivalent of
// "dest = *reinterpret_cast<Dest*>(&source)".
//
// This prevents undefined behavior when the dest pointer is unaligned.
template <class Dest, class Source>
inline void bit_store(Dest *dest, const Source *source) {
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes);
memcpy(dest, source, sizeof(Dest));
}
#ifdef HAVE___ATTRIBUTE__
# define ATTRIBUTE_WEAK __attribute__((weak))
# define ATTRIBUTE_NOINLINE __attribute__((noinline))
#else
# define ATTRIBUTE_WEAK
# define ATTRIBUTE_NOINLINE
#endif
#if defined(HAVE___ATTRIBUTE__) && defined(__ELF__)
# define ATTRIBUTE_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
#else
# define ATTRIBUTE_VISIBILITY_HIDDEN
#endif
// Section attributes are supported for both ELF and Mach-O, but in
// very different ways. Here's the API we provide:
// 1) ATTRIBUTE_SECTION: put this with the declaration of all functions
// you want to be in the same linker section
// 2) DEFINE_ATTRIBUTE_SECTION_VARS: must be called once per unique
// name. You want to make sure this is executed before any
// DECLARE_ATTRIBUTE_SECTION_VARS; the easiest way is to put them
// in the same .cc file. Put this call at the global level.
// 3) INIT_ATTRIBUTE_SECTION_VARS: you can scatter calls to this in
// multiple places to help ensure execution before any
// DECLARE_ATTRIBUTE_SECTION_VARS. You must have at least one
// DEFINE, but you can have many INITs. Put each in its own scope.
// 4) DECLARE_ATTRIBUTE_SECTION_VARS: must be called before using
// ATTRIBUTE_SECTION_START or ATTRIBUTE_SECTION_STOP on a name.
// Put this call at the global level.
// 5) ATTRIBUTE_SECTION_START/ATTRIBUTE_SECTION_STOP: call this to say
// where in memory a given section is. All functions declared with
// ATTRIBUTE_SECTION are guaranteed to be between START and STOP.
#if defined(HAVE___ATTRIBUTE__) && defined(__ELF__)
# define ATTRIBUTE_SECTION(name) __attribute__ ((section (#name)))
// Weak section declaration to be used as a global declaration
// for ATTRIBUTE_SECTION_START|STOP(name) to compile and link
// even without functions with ATTRIBUTE_SECTION(name).
# define DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char __start_##name[] ATTRIBUTE_WEAK; \
extern char __stop_##name[] ATTRIBUTE_WEAK
# define INIT_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF
# define DEFINE_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF
// Return void* pointers to start/end of a section of code with functions
// having ATTRIBUTE_SECTION(name), or 0 if no such function exists.
// One must DECLARE_ATTRIBUTE_SECTION(name) for this to compile and link.
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name))
# define HAVE_ATTRIBUTE_SECTION_START 1
#elif defined(HAVE___ATTRIBUTE__) && defined(__MACH__)
# define ATTRIBUTE_SECTION(name) __attribute__ ((section ("__TEXT, " #name)))
#include <mach-o/getsect.h>
#include <mach-o/dyld.h>
class AssignAttributeStartEnd {
public:
AssignAttributeStartEnd(const char* name, char** pstart, char** pend) {
// Find out what dynamic library name is defined in
if (_dyld_present()) {
for (int i = _dyld_image_count() - 1; i >= 0; --i) {
const mach_header* hdr = _dyld_get_image_header(i);
#ifdef MH_MAGIC_64
if (hdr->magic == MH_MAGIC_64) {
uint64_t len;
*pstart = getsectdatafromheader_64((mach_header_64*)hdr,
"__TEXT", name, &len);
if (*pstart) { // NULL if not defined in this dynamic library
*pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc
*pend = *pstart + len;
return;
}
}
#endif
if (hdr->magic == MH_MAGIC) {
uint32_t len;
*pstart = getsectdatafromheader(hdr, "__TEXT", name, &len);
if (*pstart) { // NULL if not defined in this dynamic library
*pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc
*pend = *pstart + len;
return;
}
}
}
}
// If we get here, not defined in a dll at all. See if defined statically.
unsigned long len; // don't ask me why this type isn't uint32_t too...
*pstart = getsectdata("__TEXT", name, &len);
*pend = *pstart + len;
}
};
#define DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char* __start_##name; \
extern char* __stop_##name
#define INIT_ATTRIBUTE_SECTION_VARS(name) \
DECLARE_ATTRIBUTE_SECTION_VARS(name); \
static const AssignAttributeStartEnd __assign_##name( \
#name, &__start_##name, &__stop_##name)
#define DEFINE_ATTRIBUTE_SECTION_VARS(name) \
char* __start_##name, *__stop_##name; \
INIT_ATTRIBUTE_SECTION_VARS(name)
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name))
# define HAVE_ATTRIBUTE_SECTION_START 1
#else // not HAVE___ATTRIBUTE__ && __ELF__, nor HAVE___ATTRIBUTE__ && __MACH__
# define ATTRIBUTE_SECTION(name)
# define DECLARE_ATTRIBUTE_SECTION_VARS(name)
# define INIT_ATTRIBUTE_SECTION_VARS(name)
# define DEFINE_ATTRIBUTE_SECTION_VARS(name)
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(0))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(0))
#endif // HAVE___ATTRIBUTE__ and __ELF__ or __MACH__
#if defined(HAVE___ATTRIBUTE__)
# if (defined(__i386__) || defined(__x86_64__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
# elif (defined(__PPC__) || defined(__PPC64__))
# define CACHELINE_ALIGNED __attribute__((aligned(16)))
# elif (defined(__arm__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
// some ARMs have shorter cache lines (ARM1176JZF-S is 32 bytes for example) but obviously 64-byte aligned implies 32-byte aligned
# elif (defined(__mips__))
# define CACHELINE_ALIGNED __attribute__((aligned(128)))
# elif (defined(__aarch64__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
// implementation specific, Cortex-A53 and 57 should have 64 bytes
# elif (defined(__s390__))
# define CACHELINE_ALIGNED __attribute__((aligned(256)))
# else
# error Could not determine cache line length - unknown architecture
# endif
#else
# define CACHELINE_ALIGNED
#endif // defined(HAVE___ATTRIBUTE__) && (__i386__ || __x86_64__)
// Structure for discovering alignment
union MemoryAligner {
void* p;
double d;
size_t s;
} CACHELINE_ALIGNED;
// The following enum should be used only as a constructor argument to indicate
// that the variable has static storage class, and that the constructor should
// do nothing to its state. It indicates to the reader that it is legal to
// declare a static nistance of the class, provided the constructor is given
// the base::LINKER_INITIALIZED argument. Normally, it is unsafe to declare a
// static variable that has a constructor or a destructor because invocation
// order is undefined. However, IF the type can be initialized by filling with
// zeroes (which the loader does for static variables), AND the destructor also
// does nothing to the storage, then a constructor declared as
// explicit MyClass(base::LinkerInitialized x) {}
// and invoked as
// static MyClass my_variable_name(base::LINKER_INITIALIZED);
namespace base {
enum LinkerInitialized { LINKER_INITIALIZED };
}
#endif // _BASICTYPES_H_

View File

@ -1,166 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file is a compatibility layer that defines Google's version of
// command line flags that are used for configuration.
//
// We put flags into their own namespace. It is purposefully
// named in an opaque way that people should have trouble typing
// directly. The idea is that DEFINE puts the flag in the weird
// namespace, and DECLARE imports the flag from there into the
// current namespace. The net result is to force people to use
// DECLARE to get access to a flag, rather than saying
// extern bool FLAGS_logtostderr;
// or some such instead. We want this so we can put extra
// functionality (like sanity-checking) in DECLARE if we want,
// and make sure it is picked up everywhere.
//
// We also put the type of the variable in the namespace, so that
// people can't DECLARE_int32 something that they DEFINE_bool'd
// elsewhere.
#ifndef BASE_COMMANDLINEFLAGS_H_
#define BASE_COMMANDLINEFLAGS_H_
#include "../config.h"
#include <string>
#include <string.h> // for memchr
#include <stdlib.h> // for getenv
#include "base/basictypes.h"
#define DECLARE_VARIABLE(type, name) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
extern PERFTOOLS_DLL_DECL type FLAGS_##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
#define DEFINE_VARIABLE(type, name, value, meaning) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
PERFTOOLS_DLL_DECL type FLAGS_##name(value); \
char FLAGS_no##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
// bool specialization
#define DECLARE_bool(name) \
DECLARE_VARIABLE(bool, name)
#define DEFINE_bool(name, value, meaning) \
DEFINE_VARIABLE(bool, name, value, meaning)
// int32 specialization
#define DECLARE_int32(name) \
DECLARE_VARIABLE(int32, name)
#define DEFINE_int32(name, value, meaning) \
DEFINE_VARIABLE(int32, name, value, meaning)
// int64 specialization
#define DECLARE_int64(name) \
DECLARE_VARIABLE(int64, name)
#define DEFINE_int64(name, value, meaning) \
DEFINE_VARIABLE(int64, name, value, meaning)
#define DECLARE_uint64(name) \
DECLARE_VARIABLE(uint64, name)
#define DEFINE_uint64(name, value, meaning) \
DEFINE_VARIABLE(uint64, name, value, meaning)
// double specialization
#define DECLARE_double(name) \
DECLARE_VARIABLE(double, name)
#define DEFINE_double(name, value, meaning) \
DEFINE_VARIABLE(double, name, value, meaning)
// Special case for string, because we have to specify the namespace
// std::string, which doesn't play nicely with our FLAG__namespace hackery.
#define DECLARE_string(name) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
extern std::string FLAGS_##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
#define DEFINE_string(name, value, meaning) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
std::string FLAGS_##name(value); \
char FLAGS_no##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
// implemented in sysinfo.cc
namespace tcmalloc {
namespace commandlineflags {
inline bool StringToBool(const char *value, bool def) {
if (!value) {
return def;
}
return memchr("tTyY1\0", value[0], 6) != NULL;
}
inline int StringToInt(const char *value, int def) {
if (!value) {
return def;
}
return strtol(value, NULL, 10);
}
inline long long StringToLongLong(const char *value, long long def) {
if (!value) {
return def;
}
return strtoll(value, NULL, 10);
}
inline double StringToDouble(const char *value, double def) {
if (!value) {
return def;
}
return strtod(value, NULL);
}
}
}
// These macros (could be functions, but I don't want to bother with a .cc
// file), make it easier to initialize flags from the environment.
#define EnvToString(envname, dflt) \
(!getenv(envname) ? (dflt) : getenv(envname))
#define EnvToBool(envname, dflt) \
tcmalloc::commandlineflags::StringToBool(getenv(envname), dflt)
#define EnvToInt(envname, dflt) \
tcmalloc::commandlineflags::StringToInt(getenv(envname), dflt)
#define EnvToInt64(envname, dflt) \
tcmalloc::commandlineflags::StringToLongLong(getenv(envname), dflt)
#define EnvToDouble(envname, dflt) \
tcmalloc::commandlineflags::StringToDouble(getenv(envname), dflt)
#endif // BASE_COMMANDLINEFLAGS_H_

View File

@ -1,179 +0,0 @@
/* Copyright (c) 2008-2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Kostya Serebryany
*/
#ifdef __cplusplus
# error "This file should be built as pure C to avoid name mangling"
#endif
#include "config.h"
#include <stdlib.h>
#include <string.h>
#include "base/dynamic_annotations.h"
#include "getenv_safe.h" // for TCMallocGetenvSafe
#ifdef __GNUC__
/* valgrind.h uses gcc extensions so it won't build with other compilers */
# ifdef HAVE_VALGRIND_H /* prefer the user's copy if they have it */
# include <valgrind.h>
# else /* otherwise just use the copy that we have */
# include "third_party/valgrind.h"
# endif
#endif
/* Compiler-based ThreadSanitizer defines
DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1
and provides its own definitions of the functions. */
#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL
# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0
#endif
/* Each function is empty and called (via a macro) only in debug mode.
The arguments are captured by dynamic tools at runtime. */
#if DYNAMIC_ANNOTATIONS_ENABLED == 1 \
&& DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
void AnnotateRWLockCreate(const char *file, int line,
const volatile void *lock){}
void AnnotateRWLockDestroy(const char *file, int line,
const volatile void *lock){}
void AnnotateRWLockAcquired(const char *file, int line,
const volatile void *lock, long is_w){}
void AnnotateRWLockReleased(const char *file, int line,
const volatile void *lock, long is_w){}
void AnnotateBarrierInit(const char *file, int line,
const volatile void *barrier, long count,
long reinitialization_allowed) {}
void AnnotateBarrierWaitBefore(const char *file, int line,
const volatile void *barrier) {}
void AnnotateBarrierWaitAfter(const char *file, int line,
const volatile void *barrier) {}
void AnnotateBarrierDestroy(const char *file, int line,
const volatile void *barrier) {}
void AnnotateCondVarWait(const char *file, int line,
const volatile void *cv,
const volatile void *lock){}
void AnnotateCondVarSignal(const char *file, int line,
const volatile void *cv){}
void AnnotateCondVarSignalAll(const char *file, int line,
const volatile void *cv){}
void AnnotatePublishMemoryRange(const char *file, int line,
const volatile void *address,
long size){}
void AnnotateUnpublishMemoryRange(const char *file, int line,
const volatile void *address,
long size){}
void AnnotatePCQCreate(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQDestroy(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQPut(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQGet(const char *file, int line,
const volatile void *pcq){}
void AnnotateNewMemory(const char *file, int line,
const volatile void *mem,
long size){}
void AnnotateExpectRace(const char *file, int line,
const volatile void *mem,
const char *description){}
void AnnotateBenignRace(const char *file, int line,
const volatile void *mem,
const char *description){}
void AnnotateBenignRaceSized(const char *file, int line,
const volatile void *mem,
long size,
const char *description) {}
void AnnotateMutexIsUsedAsCondVar(const char *file, int line,
const volatile void *mu){}
void AnnotateTraceMemory(const char *file, int line,
const volatile void *arg){}
void AnnotateThreadName(const char *file, int line,
const char *name){}
void AnnotateIgnoreReadsBegin(const char *file, int line){}
void AnnotateIgnoreReadsEnd(const char *file, int line){}
void AnnotateIgnoreWritesBegin(const char *file, int line){}
void AnnotateIgnoreWritesEnd(const char *file, int line){}
void AnnotateEnableRaceDetection(const char *file, int line, int enable){}
void AnnotateNoOp(const char *file, int line,
const volatile void *arg){}
void AnnotateFlushState(const char *file, int line){}
#endif /* DYNAMIC_ANNOTATIONS_ENABLED == 1
&& DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
static int GetRunningOnValgrind(void) {
#ifdef RUNNING_ON_VALGRIND
if (RUNNING_ON_VALGRIND) return 1;
#endif
const char *running_on_valgrind_str = TCMallocGetenvSafe("RUNNING_ON_VALGRIND");
if (running_on_valgrind_str) {
return strcmp(running_on_valgrind_str, "0") != 0;
}
return 0;
}
/* See the comments in dynamic_annotations.h */
int RunningOnValgrind(void) {
static volatile int running_on_valgrind = -1;
int local_running_on_valgrind = running_on_valgrind;
/* C doesn't have thread-safe initialization of statics, and we
don't want to depend on pthread_once here, so hack it. */
ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack");
if (local_running_on_valgrind == -1)
running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
return local_running_on_valgrind;
}
#endif /* DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
/* See the comments in dynamic_annotations.h */
double ValgrindSlowdown(void) {
/* Same initialization hack as in RunningOnValgrind(). */
static volatile double slowdown = 0.0;
double local_slowdown = slowdown;
ANNOTATE_BENIGN_RACE(&slowdown, "safe hack");
if (RunningOnValgrind() == 0) {
return 1.0;
}
if (local_slowdown == 0.0) {
char *env = getenv("VALGRIND_SLOWDOWN");
slowdown = local_slowdown = env ? atof(env) : 50.0;
}
return local_slowdown;
}

View File

@ -1,627 +0,0 @@
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Kostya Serebryany
*/
/* This file defines dynamic annotations for use with dynamic analysis
tool such as valgrind, PIN, etc.
Dynamic annotation is a source code annotation that affects
the generated code (that is, the annotation is not a comment).
Each such annotation is attached to a particular
instruction and/or to a particular object (address) in the program.
The annotations that should be used by users are macros in all upper-case
(e.g., ANNOTATE_NEW_MEMORY).
Actual implementation of these macros may differ depending on the
dynamic analysis tool being used.
See http://code.google.com/p/data-race-test/ for more information.
This file supports the following dynamic analysis tools:
- None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero).
Macros are defined empty.
- ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1).
Macros are defined as calls to non-inlinable empty functions
that are intercepted by Valgrind. */
#ifndef BASE_DYNAMIC_ANNOTATIONS_H_
#define BASE_DYNAMIC_ANNOTATIONS_H_
#ifndef DYNAMIC_ANNOTATIONS_ENABLED
# define DYNAMIC_ANNOTATIONS_ENABLED 0
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0
/* -------------------------------------------------------------
Annotations useful when implementing condition variables such as CondVar,
using conditional critical sections (Await/LockWhen) and when constructing
user-defined synchronization mechanisms.
The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can
be used to define happens-before arcs in user-defined synchronization
mechanisms: the race detector will infer an arc from the former to the
latter when they share the same argument pointer.
Example 1 (reference counting):
void Unref() {
ANNOTATE_HAPPENS_BEFORE(&refcount_);
if (AtomicDecrementByOne(&refcount_) == 0) {
ANNOTATE_HAPPENS_AFTER(&refcount_);
delete this;
}
}
Example 2 (message queue):
void MyQueue::Put(Type *e) {
MutexLock lock(&mu_);
ANNOTATE_HAPPENS_BEFORE(e);
PutElementIntoMyQueue(e);
}
Type *MyQueue::Get() {
MutexLock lock(&mu_);
Type *e = GetElementFromMyQueue();
ANNOTATE_HAPPENS_AFTER(e);
return e;
}
Note: when possible, please use the existing reference counting and message
queue implementations instead of inventing new ones. */
/* Report that wait on the condition variable at address "cv" has succeeded
and the lock at address "lock" is held. */
#define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
AnnotateCondVarWait(__FILE__, __LINE__, cv, lock)
/* Report that wait on the condition variable at "cv" has succeeded. Variant
w/o lock. */
#define ANNOTATE_CONDVAR_WAIT(cv) \
AnnotateCondVarWait(__FILE__, __LINE__, cv, NULL)
/* Report that we are about to signal on the condition variable at address
"cv". */
#define ANNOTATE_CONDVAR_SIGNAL(cv) \
AnnotateCondVarSignal(__FILE__, __LINE__, cv)
/* Report that we are about to signal_all on the condition variable at "cv". */
#define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
AnnotateCondVarSignalAll(__FILE__, __LINE__, cv)
/* Annotations for user-defined synchronization mechanisms. */
#define ANNOTATE_HAPPENS_BEFORE(obj) ANNOTATE_CONDVAR_SIGNAL(obj)
#define ANNOTATE_HAPPENS_AFTER(obj) ANNOTATE_CONDVAR_WAIT(obj)
/* Report that the bytes in the range [pointer, pointer+size) are about
to be published safely. The race checker will create a happens-before
arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
subsequent accesses to this memory.
Note: this annotation may not work properly if the race detector uses
sampling, i.e. does not observe all memory accesses.
*/
#define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
AnnotatePublishMemoryRange(__FILE__, __LINE__, pointer, size)
/* DEPRECATED. Don't use it. */
#define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \
AnnotateUnpublishMemoryRange(__FILE__, __LINE__, pointer, size)
/* DEPRECATED. Don't use it. */
#define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) \
do { \
ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \
ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size); \
} while (0)
/* Instruct the tool to create a happens-before arc between mu->Unlock() and
mu->Lock(). This annotation may slow down the race detector and hide real
races. Normally it is used only when it would be difficult to annotate each
of the mutex's critical sections individually using the annotations above.
This annotation makes sense only for hybrid race detectors. For pure
happens-before detectors this is a no-op. For more details see
http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */
#define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu)
/* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
#define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \
AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu)
/* -------------------------------------------------------------
Annotations useful when defining memory allocators, or when memory that
was protected in one way starts to be protected in another. */
/* Report that a new memory at "address" of size "size" has been allocated.
This might be used when the memory has been retrieved from a free list and
is about to be reused, or when a the locking discipline for a variable
changes. */
#define ANNOTATE_NEW_MEMORY(address, size) \
AnnotateNewMemory(__FILE__, __LINE__, address, size)
/* -------------------------------------------------------------
Annotations useful when defining FIFO queues that transfer data between
threads. */
/* Report that the producer-consumer queue (such as ProducerConsumerQueue) at
address "pcq" has been created. The ANNOTATE_PCQ_* annotations
should be used only for FIFO queues. For non-FIFO queues use
ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
#define ANNOTATE_PCQ_CREATE(pcq) \
AnnotatePCQCreate(__FILE__, __LINE__, pcq)
/* Report that the queue at address "pcq" is about to be destroyed. */
#define ANNOTATE_PCQ_DESTROY(pcq) \
AnnotatePCQDestroy(__FILE__, __LINE__, pcq)
/* Report that we are about to put an element into a FIFO queue at address
"pcq". */
#define ANNOTATE_PCQ_PUT(pcq) \
AnnotatePCQPut(__FILE__, __LINE__, pcq)
/* Report that we've just got an element from a FIFO queue at address "pcq". */
#define ANNOTATE_PCQ_GET(pcq) \
AnnotatePCQGet(__FILE__, __LINE__, pcq)
/* -------------------------------------------------------------
Annotations that suppress errors. It is usually better to express the
program's synchronization using the other annotations, but these can
be used when all else fails. */
/* Report that we may have a benign race at "pointer", with size
"sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the
point where "pointer" has been allocated, preferably close to the point
where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */
#define ANNOTATE_BENIGN_RACE(pointer, description) \
AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \
sizeof(*(pointer)), description)
/* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
the memory range [address, address+size). */
#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description)
/* Request the analysis tool to ignore all reads in the current thread
until ANNOTATE_IGNORE_READS_END is called.
Useful to ignore intentional racey reads, while still checking
other reads and all writes.
See also ANNOTATE_UNPROTECTED_READ. */
#define ANNOTATE_IGNORE_READS_BEGIN() \
AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
/* Stop ignoring reads. */
#define ANNOTATE_IGNORE_READS_END() \
AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
/* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
/* Stop ignoring writes. */
#define ANNOTATE_IGNORE_WRITES_END() \
AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
/* Start ignoring all memory accesses (reads and writes). */
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do {\
ANNOTATE_IGNORE_READS_BEGIN();\
ANNOTATE_IGNORE_WRITES_BEGIN();\
}while(0)\
/* Stop ignoring all memory accesses. */
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do {\
ANNOTATE_IGNORE_WRITES_END();\
ANNOTATE_IGNORE_READS_END();\
}while(0)\
/* Enable (enable!=0) or disable (enable==0) race detection for all threads.
This annotation could be useful if you want to skip expensive race analysis
during some period of program execution, e.g. during initialization. */
#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
AnnotateEnableRaceDetection(__FILE__, __LINE__, enable)
/* -------------------------------------------------------------
Annotations useful for debugging. */
/* Request to trace every access to "address". */
#define ANNOTATE_TRACE_MEMORY(address) \
AnnotateTraceMemory(__FILE__, __LINE__, address)
/* Report the current thread name to a race detector. */
#define ANNOTATE_THREAD_NAME(name) \
AnnotateThreadName(__FILE__, __LINE__, name)
/* -------------------------------------------------------------
Annotations useful when implementing locks. They are not
normally needed by modules that merely use locks.
The "lock" argument is a pointer to the lock object. */
/* Report that a lock has been created at address "lock". */
#define ANNOTATE_RWLOCK_CREATE(lock) \
AnnotateRWLockCreate(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" is about to be destroyed. */
#define ANNOTATE_RWLOCK_DESTROY(lock) \
AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" has been acquired.
is_w=1 for writer lock, is_w=0 for reader lock. */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
/* Report that the lock at address "lock" is about to be released. */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
/* -------------------------------------------------------------
Annotations useful when implementing barriers. They are not
normally needed by modules that merely use barriers.
The "barrier" argument is a pointer to the barrier object. */
/* Report that the "barrier" has been initialized with initial "count".
If 'reinitialization_allowed' is true, initialization is allowed to happen
multiple times w/o calling barrier_destroy() */
#define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
AnnotateBarrierInit(__FILE__, __LINE__, barrier, count, \
reinitialization_allowed)
/* Report that we are about to enter barrier_wait("barrier"). */
#define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
AnnotateBarrierWaitBefore(__FILE__, __LINE__, barrier)
/* Report that we just exited barrier_wait("barrier"). */
#define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
AnnotateBarrierWaitAfter(__FILE__, __LINE__, barrier)
/* Report that the "barrier" has been destroyed. */
#define ANNOTATE_BARRIER_DESTROY(barrier) \
AnnotateBarrierDestroy(__FILE__, __LINE__, barrier)
/* -------------------------------------------------------------
Annotations useful for testing race detectors. */
/* Report that we expect a race on the variable at "address".
Use only in unit tests for a race detector. */
#define ANNOTATE_EXPECT_RACE(address, description) \
AnnotateExpectRace(__FILE__, __LINE__, address, description)
/* A no-op. Insert where you like to test the interceptors. */
#define ANNOTATE_NO_OP(arg) \
AnnotateNoOp(__FILE__, __LINE__, arg)
/* Force the race detector to flush its state. The actual effect depends on
* the implementation of the detector. */
#define ANNOTATE_FLUSH_STATE() \
AnnotateFlushState(__FILE__, __LINE__)
#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
#define ANNOTATE_RWLOCK_CREATE(lock) /* empty */
#define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
#define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */
#define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */
#define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */
#define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */
#define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */
#define ANNOTATE_CONDVAR_WAIT(cv) /* empty */
#define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */
#define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */
#define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */
#define ANNOTATE_HAPPENS_AFTER(obj) /* empty */
#define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */
#define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size) /* empty */
#define ANNOTATE_SWAP_MEMORY_RANGE(address, size) /* empty */
#define ANNOTATE_PCQ_CREATE(pcq) /* empty */
#define ANNOTATE_PCQ_DESTROY(pcq) /* empty */
#define ANNOTATE_PCQ_PUT(pcq) /* empty */
#define ANNOTATE_PCQ_GET(pcq) /* empty */
#define ANNOTATE_NEW_MEMORY(address, size) /* empty */
#define ANNOTATE_EXPECT_RACE(address, description) /* empty */
#define ANNOTATE_BENIGN_RACE(address, description) /* empty */
#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
#define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */
#define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */
#define ANNOTATE_TRACE_MEMORY(arg) /* empty */
#define ANNOTATE_THREAD_NAME(name) /* empty */
#define ANNOTATE_IGNORE_READS_BEGIN() /* empty */
#define ANNOTATE_IGNORE_READS_END() /* empty */
#define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */
#define ANNOTATE_IGNORE_WRITES_END() /* empty */
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */
#define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
#define ANNOTATE_NO_OP(arg) /* empty */
#define ANNOTATE_FLUSH_STATE() /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
/* Macro definitions for GCC attributes that allow static thread safety
analysis to recognize and use some of the dynamic annotations as
escape hatches.
TODO(lcwu): remove the check for __SUPPORT_DYN_ANNOTATION__ once the
default crosstool/GCC supports these GCC attributes. */
#define ANNOTALYSIS_STATIC_INLINE
#define ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY ;
#define ANNOTALYSIS_IGNORE_READS_BEGIN
#define ANNOTALYSIS_IGNORE_READS_END
#define ANNOTALYSIS_IGNORE_WRITES_BEGIN
#define ANNOTALYSIS_IGNORE_WRITES_END
#define ANNOTALYSIS_UNPROTECTED_READ
#if defined(__GNUC__) && (!defined(SWIG)) && (!defined(__clang__)) && \
defined(__SUPPORT_TS_ANNOTATION__) && defined(__SUPPORT_DYN_ANNOTATION__)
#if DYNAMIC_ANNOTATIONS_ENABLED == 0
#define ANNOTALYSIS_ONLY 1
#undef ANNOTALYSIS_STATIC_INLINE
#define ANNOTALYSIS_STATIC_INLINE static inline
#undef ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
#define ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY { (void)file; (void)line; }
#endif
/* Only emit attributes when annotalysis is enabled. */
#if defined(__SUPPORT_TS_ANNOTATION__) && defined(__SUPPORT_DYN_ANNOTATION__)
#undef ANNOTALYSIS_IGNORE_READS_BEGIN
#define ANNOTALYSIS_IGNORE_READS_BEGIN __attribute__ ((ignore_reads_begin))
#undef ANNOTALYSIS_IGNORE_READS_END
#define ANNOTALYSIS_IGNORE_READS_END __attribute__ ((ignore_reads_end))
#undef ANNOTALYSIS_IGNORE_WRITES_BEGIN
#define ANNOTALYSIS_IGNORE_WRITES_BEGIN __attribute__ ((ignore_writes_begin))
#undef ANNOTALYSIS_IGNORE_WRITES_END
#define ANNOTALYSIS_IGNORE_WRITES_END __attribute__ ((ignore_writes_end))
#undef ANNOTALYSIS_UNPROTECTED_READ
#define ANNOTALYSIS_UNPROTECTED_READ __attribute__ ((unprotected_read))
#endif
#endif // defined(__GNUC__) && (!defined(SWIG)) && (!defined(__clang__))
/* Use the macros above rather than using these functions directly. */
#ifdef __cplusplus
extern "C" {
#endif
void AnnotateRWLockCreate(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockDestroy(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockAcquired(const char *file, int line,
const volatile void *lock, long is_w);
void AnnotateRWLockReleased(const char *file, int line,
const volatile void *lock, long is_w);
void AnnotateBarrierInit(const char *file, int line,
const volatile void *barrier, long count,
long reinitialization_allowed);
void AnnotateBarrierWaitBefore(const char *file, int line,
const volatile void *barrier);
void AnnotateBarrierWaitAfter(const char *file, int line,
const volatile void *barrier);
void AnnotateBarrierDestroy(const char *file, int line,
const volatile void *barrier);
void AnnotateCondVarWait(const char *file, int line,
const volatile void *cv,
const volatile void *lock);
void AnnotateCondVarSignal(const char *file, int line,
const volatile void *cv);
void AnnotateCondVarSignalAll(const char *file, int line,
const volatile void *cv);
void AnnotatePublishMemoryRange(const char *file, int line,
const volatile void *address,
long size);
void AnnotateUnpublishMemoryRange(const char *file, int line,
const volatile void *address,
long size);
void AnnotatePCQCreate(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQDestroy(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQPut(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQGet(const char *file, int line,
const volatile void *pcq);
void AnnotateNewMemory(const char *file, int line,
const volatile void *address,
long size);
void AnnotateExpectRace(const char *file, int line,
const volatile void *address,
const char *description);
void AnnotateBenignRace(const char *file, int line,
const volatile void *address,
const char *description);
void AnnotateBenignRaceSized(const char *file, int line,
const volatile void *address,
long size,
const char *description);
void AnnotateMutexIsUsedAsCondVar(const char *file, int line,
const volatile void *mu);
void AnnotateTraceMemory(const char *file, int line,
const volatile void *arg);
void AnnotateThreadName(const char *file, int line,
const char *name);
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreReadsBegin(const char *file, int line)
ANNOTALYSIS_IGNORE_READS_BEGIN ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreReadsEnd(const char *file, int line)
ANNOTALYSIS_IGNORE_READS_END ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreWritesBegin(const char *file, int line)
ANNOTALYSIS_IGNORE_WRITES_BEGIN ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreWritesEnd(const char *file, int line)
ANNOTALYSIS_IGNORE_WRITES_END ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
void AnnotateEnableRaceDetection(const char *file, int line, int enable);
void AnnotateNoOp(const char *file, int line,
const volatile void *arg);
void AnnotateFlushState(const char *file, int line);
/* Return non-zero value if running under valgrind.
If "valgrind.h" is included into dynamic_annotations.c,
the regular valgrind mechanism will be used.
See http://valgrind.org/docs/manual/manual-core-adv.html about
RUNNING_ON_VALGRIND and other valgrind "client requests".
The file "valgrind.h" may be obtained by doing
svn co svn://svn.valgrind.org/valgrind/trunk/include
If for some reason you can't use "valgrind.h" or want to fake valgrind,
there are two ways to make this function return non-zero:
- Use environment variable: export RUNNING_ON_VALGRIND=1
- Make your tool intercept the function RunningOnValgrind() and
change its return value.
*/
int RunningOnValgrind(void);
/* ValgrindSlowdown returns:
* 1.0, if (RunningOnValgrind() == 0)
* 50.0, if (RunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") == NULL)
* atof(getenv("VALGRIND_SLOWDOWN")) otherwise
This function can be used to scale timeout values:
EXAMPLE:
for (;;) {
DoExpensiveBackgroundTask();
SleepForSeconds(5 * ValgrindSlowdown());
}
*/
double ValgrindSlowdown(void);
#ifdef __cplusplus
}
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
/* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
Instead of doing
ANNOTATE_IGNORE_READS_BEGIN();
... = x;
ANNOTATE_IGNORE_READS_END();
one can use
... = ANNOTATE_UNPROTECTED_READ(x); */
template <class T>
inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x)
ANNOTALYSIS_UNPROTECTED_READ {
ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
ANNOTATE_IGNORE_READS_END();
return res;
}
/* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
namespace { \
class static_var ## _annotator { \
public: \
static_var ## _annotator() { \
ANNOTATE_BENIGN_RACE_SIZED(&static_var, \
sizeof(static_var), \
# static_var ": " description); \
} \
}; \
static static_var ## _annotator the ## static_var ## _annotator;\
}
#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
#define ANNOTATE_UNPROTECTED_READ(x) (x)
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
/* Annotalysis, a GCC based static analyzer, is able to understand and use
some of the dynamic annotations defined in this file. However, dynamic
annotations are usually disabled in the opt mode (to avoid additional
runtime overheads) while Annotalysis only works in the opt mode.
In order for Annotalysis to use these dynamic annotations when they
are disabled, we re-define these annotations here. Note that unlike the
original macro definitions above, these macros are expanded to calls to
static inline functions so that the compiler will be able to remove the
calls after the analysis. */
#ifdef ANNOTALYSIS_ONLY
#undef ANNOTALYSIS_ONLY
/* Undefine and re-define the macros that the static analyzer understands. */
#undef ANNOTATE_IGNORE_READS_BEGIN
#define ANNOTATE_IGNORE_READS_BEGIN() \
AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_READS_END
#define ANNOTATE_IGNORE_READS_END() \
AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_WRITES_BEGIN
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_WRITES_END
#define ANNOTATE_IGNORE_WRITES_END() \
AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do { \
ANNOTATE_IGNORE_READS_BEGIN(); \
ANNOTATE_IGNORE_WRITES_BEGIN(); \
}while(0) \
#undef ANNOTATE_IGNORE_READS_AND_WRITES_END
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do { \
ANNOTATE_IGNORE_WRITES_END(); \
ANNOTATE_IGNORE_READS_END(); \
}while(0) \
#if defined(__cplusplus)
#undef ANNOTATE_UNPROTECTED_READ
template <class T>
inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x)
ANNOTALYSIS_UNPROTECTED_READ {
ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
ANNOTATE_IGNORE_READS_END();
return res;
}
#endif /* __cplusplus */
#endif /* ANNOTALYSIS_ONLY */
/* Undefine the macros intended only in this file. */
#undef ANNOTALYSIS_STATIC_INLINE
#undef ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
#endif /* BASE_DYNAMIC_ANNOTATIONS_H_ */

View File

@ -1,443 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in an in-memory Elf image.
//
#include "base/elf_mem_image.h"
#ifdef HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h
#include <stddef.h> // for size_t, ptrdiff_t
#include "base/logging.h"
// From binutils/include/elf/common.h (this doesn't appear to be documented
// anywhere else).
//
// /* This flag appears in a Versym structure. It means that the symbol
// is hidden, and is only visible with an explicit version number.
// This is a GNU extension. */
// #define VERSYM_HIDDEN 0x8000
//
// /* This is the mask for the rest of the Versym information. */
// #define VERSYM_VERSION 0x7fff
#define VERSYM_VERSION 0x7fff
#if __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-const-variable"
#endif
namespace base {
namespace {
template <int N> class ElfClass {
public:
static const int kElfClass = -1;
static int ElfBind(const ElfW(Sym) *) {
CHECK(false); // << "Unexpected word size";
return 0;
}
static int ElfType(const ElfW(Sym) *) {
CHECK(false); // << "Unexpected word size";
return 0;
}
};
template <> class ElfClass<32> {
public:
static const int kElfClass = ELFCLASS32;
static int ElfBind(const ElfW(Sym) *symbol) {
return ELF32_ST_BIND(symbol->st_info);
}
static int ElfType(const ElfW(Sym) *symbol) {
return ELF32_ST_TYPE(symbol->st_info);
}
};
template <> class ElfClass<64> {
public:
static const int kElfClass = ELFCLASS64;
static int ElfBind(const ElfW(Sym) *symbol) {
return ELF64_ST_BIND(symbol->st_info);
}
static int ElfType(const ElfW(Sym) *symbol) {
return ELF64_ST_TYPE(symbol->st_info);
}
};
typedef ElfClass<__WORDSIZE> CurrentElfClass;
// Extract an element from one of the ELF tables, cast it to desired type.
// This is just a simple arithmetic and a glorified cast.
// Callers are responsible for bounds checking.
template <class T>
const T* GetTableElement(const ElfW(Ehdr) *ehdr,
ElfW(Off) table_offset,
ElfW(Word) element_size,
size_t index) {
return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr)
+ table_offset
+ index * element_size);
}
} // namespace
const void *const ElfMemImage::kInvalidBase =
reinterpret_cast<const void *>(~0L);
ElfMemImage::ElfMemImage(const void *base) {
CHECK(base != kInvalidBase);
Init(base);
}
int ElfMemImage::GetNumSymbols() const {
if (!hash_) {
return 0;
}
// See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
return hash_[1];
}
const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
CHECK_LT(index, GetNumSymbols());
return dynsym_ + index;
}
const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
CHECK_LT(index, GetNumSymbols());
return versym_ + index;
}
const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const {
CHECK_LT(index, ehdr_->e_phnum);
return GetTableElement<ElfW(Phdr)>(ehdr_,
ehdr_->e_phoff,
ehdr_->e_phentsize,
index);
}
const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const {
CHECK_LT(offset, strsize_);
return dynstr_ + offset;
}
const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const {
if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) {
// Symbol corresponds to "special" (e.g. SHN_ABS) section.
return reinterpret_cast<const void *>(sym->st_value);
}
CHECK_LT(link_base_, sym->st_value);
return GetTableElement<char>(ehdr_, 0, 1, sym->st_value) - link_base_;
}
const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const {
CHECK_LE(index, verdefnum_);
const ElfW(Verdef) *version_definition = verdef_;
while (version_definition->vd_ndx < index && version_definition->vd_next) {
const char *const version_definition_as_char =
reinterpret_cast<const char *>(version_definition);
version_definition =
reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
version_definition->vd_next);
}
return version_definition->vd_ndx == index ? version_definition : NULL;
}
const ElfW(Verdaux) *ElfMemImage::GetVerdefAux(
const ElfW(Verdef) *verdef) const {
return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1);
}
const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const {
CHECK_LT(offset, strsize_);
return dynstr_ + offset;
}
void ElfMemImage::Init(const void *base) {
ehdr_ = NULL;
dynsym_ = NULL;
dynstr_ = NULL;
versym_ = NULL;
verdef_ = NULL;
hash_ = NULL;
strsize_ = 0;
verdefnum_ = 0;
link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
if (!base) {
return;
}
const intptr_t base_as_uintptr_t = reinterpret_cast<uintptr_t>(base);
// Fake VDSO has low bit set.
const bool fake_vdso = ((base_as_uintptr_t & 1) != 0);
base = reinterpret_cast<const void *>(base_as_uintptr_t & ~1);
const char *const base_as_char = reinterpret_cast<const char *>(base);
if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 ||
base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) {
RAW_DCHECK(false, "no ELF magic"); // at %p", base);
return;
}
int elf_class = base_as_char[EI_CLASS];
if (elf_class != CurrentElfClass::kElfClass) {
DCHECK_EQ(elf_class, CurrentElfClass::kElfClass);
return;
}
switch (base_as_char[EI_DATA]) {
case ELFDATA2LSB: {
if (__LITTLE_ENDIAN != __BYTE_ORDER) {
DCHECK_EQ(__LITTLE_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
return;
}
break;
}
case ELFDATA2MSB: {
if (__BIG_ENDIAN != __BYTE_ORDER) {
DCHECK_EQ(__BIG_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
return;
}
break;
}
default: {
RAW_DCHECK(false, "unexpected data encoding"); // << base_as_char[EI_DATA];
return;
}
}
ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
const ElfW(Phdr) *dynamic_program_header = NULL;
for (int i = 0; i < ehdr_->e_phnum; ++i) {
const ElfW(Phdr) *const program_header = GetPhdr(i);
switch (program_header->p_type) {
case PT_LOAD:
if (link_base_ == ~0L) {
link_base_ = program_header->p_vaddr;
}
break;
case PT_DYNAMIC:
dynamic_program_header = program_header;
break;
}
}
if (link_base_ == ~0L || !dynamic_program_header) {
RAW_DCHECK(~0L != link_base_, "no PT_LOADs in VDSO");
RAW_DCHECK(dynamic_program_header, "no PT_DYNAMIC in VDSO");
// Mark this image as not present. Can not recur infinitely.
Init(0);
return;
}
ptrdiff_t relocation =
base_as_char - reinterpret_cast<const char *>(link_base_);
ElfW(Dyn) *dynamic_entry =
reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
relocation);
for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
ElfW(Xword) value = dynamic_entry->d_un.d_val;
if (fake_vdso) {
// A complication: in the real VDSO, dynamic entries are not relocated
// (it wasn't loaded by a dynamic loader). But when testing with a
// "fake" dlopen()ed vdso library, the loader relocates some (but
// not all!) of them before we get here.
if (dynamic_entry->d_tag == DT_VERDEF) {
// The only dynamic entry (of the ones we care about) libc-2.3.6
// loader doesn't relocate.
value += relocation;
}
} else {
// Real VDSO. Everything needs to be relocated.
value += relocation;
}
switch (dynamic_entry->d_tag) {
case DT_HASH:
hash_ = reinterpret_cast<ElfW(Word) *>(value);
break;
case DT_SYMTAB:
dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
break;
case DT_STRTAB:
dynstr_ = reinterpret_cast<const char *>(value);
break;
case DT_VERSYM:
versym_ = reinterpret_cast<ElfW(Versym) *>(value);
break;
case DT_VERDEF:
verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
break;
case DT_VERDEFNUM:
verdefnum_ = dynamic_entry->d_un.d_val;
break;
case DT_STRSZ:
strsize_ = dynamic_entry->d_un.d_val;
break;
default:
// Unrecognized entries explicitly ignored.
break;
}
}
if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
!verdef_ || !verdefnum_ || !strsize_) {
RAW_DCHECK(hash_, "invalid VDSO (no DT_HASH)");
RAW_DCHECK(dynsym_, "invalid VDSO (no DT_SYMTAB)");
RAW_DCHECK(dynstr_, "invalid VDSO (no DT_STRTAB)");
RAW_DCHECK(versym_, "invalid VDSO (no DT_VERSYM)");
RAW_DCHECK(verdef_, "invalid VDSO (no DT_VERDEF)");
RAW_DCHECK(verdefnum_, "invalid VDSO (no DT_VERDEFNUM)");
RAW_DCHECK(strsize_, "invalid VDSO (no DT_STRSZ)");
// Mark this image as not present. Can not recur infinitely.
Init(0);
return;
}
}
bool ElfMemImage::LookupSymbol(const char *name,
const char *version,
int type,
SymbolInfo *info) const {
for (SymbolIterator it = begin(); it != end(); ++it) {
if (strcmp(it->name, name) == 0 && strcmp(it->version, version) == 0 &&
CurrentElfClass::ElfType(it->symbol) == type) {
if (info) {
*info = *it;
}
return true;
}
}
return false;
}
bool ElfMemImage::LookupSymbolByAddress(const void *address,
SymbolInfo *info_out) const {
for (SymbolIterator it = begin(); it != end(); ++it) {
const char *const symbol_start =
reinterpret_cast<const char *>(it->address);
const char *const symbol_end = symbol_start + it->symbol->st_size;
if (symbol_start <= address && address < symbol_end) {
if (info_out) {
// Client wants to know details for that symbol (the usual case).
if (CurrentElfClass::ElfBind(it->symbol) == STB_GLOBAL) {
// Strong symbol; just return it.
*info_out = *it;
return true;
} else {
// Weak or local. Record it, but keep looking for a strong one.
*info_out = *it;
}
} else {
// Client only cares if there is an overlapping symbol.
return true;
}
}
}
return false;
}
ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
: index_(index), image_(image) {
}
const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
return &info_;
}
const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const {
return info_;
}
bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const {
return this->image_ == rhs.image_ && this->index_ == rhs.index_;
}
bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const {
return !(*this == rhs);
}
ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() {
this->Update(1);
return *this;
}
ElfMemImage::SymbolIterator ElfMemImage::begin() const {
SymbolIterator it(this, 0);
it.Update(0);
return it;
}
ElfMemImage::SymbolIterator ElfMemImage::end() const {
return SymbolIterator(this, GetNumSymbols());
}
void ElfMemImage::SymbolIterator::Update(int increment) {
const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
CHECK(image->IsPresent() || increment == 0);
if (!image->IsPresent()) {
return;
}
index_ += increment;
if (index_ >= image->GetNumSymbols()) {
index_ = image->GetNumSymbols();
return;
}
const ElfW(Sym) *symbol = image->GetDynsym(index_);
const ElfW(Versym) *version_symbol = image->GetVersym(index_);
CHECK(symbol && version_symbol);
const char *const symbol_name = image->GetDynstr(symbol->st_name);
const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
const ElfW(Verdef) *version_definition = NULL;
const char *version_name = "";
if (symbol->st_shndx == SHN_UNDEF) {
// Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
// version_index could well be greater than verdefnum_, so calling
// GetVerdef(version_index) may trigger assertion.
} else {
version_definition = image->GetVerdef(version_index);
}
if (version_definition) {
// I am expecting 1 or 2 auxiliary entries: 1 for the version itself,
// optional 2nd if the version has a parent.
CHECK_LE(1, version_definition->vd_cnt);
CHECK_LE(version_definition->vd_cnt, 2);
const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition);
version_name = image->GetVerstr(version_aux->vda_name);
}
info_.name = symbol_name;
info_.version = version_name;
info_.address = image->GetSymAddr(symbol);
info_.symbol = symbol;
}
} // namespace base
#if __clang__
#pragma clang diagnostic pop
#endif
#endif // HAVE_ELF_MEM_IMAGE

View File

@ -1,135 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup for in-memory Elf images.
#ifndef BASE_ELF_MEM_IMAGE_H_
#define BASE_ELF_MEM_IMAGE_H_
#include "../config.h"
#ifdef HAVE_FEATURES_H
#include <features.h> // for __GLIBC__
#endif
// Maybe one day we can rewrite this file not to require the elf
// symbol extensions in glibc, but for right now we need them.
#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__)
#define HAVE_ELF_MEM_IMAGE 1
#include <stdlib.h>
#include <link.h> // for ElfW
namespace base {
// An in-memory ELF image (may not exist on disk).
class ElfMemImage {
public:
// Sentinel: there could never be an elf image at this address.
static const void *const kInvalidBase;
// Information about a single vdso symbol.
// All pointers are into .dynsym, .dynstr, or .text of the VDSO.
// Do not free() them or modify through them.
struct SymbolInfo {
const char *name; // E.g. "__vdso_getcpu"
const char *version; // E.g. "LINUX_2.6", could be ""
// for unversioned symbol.
const void *address; // Relocated symbol address.
const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table.
};
// Supports iteration over all dynamic symbols.
class SymbolIterator {
public:
friend class ElfMemImage;
const SymbolInfo *operator->() const;
const SymbolInfo &operator*() const;
SymbolIterator& operator++();
bool operator!=(const SymbolIterator &rhs) const;
bool operator==(const SymbolIterator &rhs) const;
private:
SymbolIterator(const void *const image, int index);
void Update(int incr);
SymbolInfo info_;
int index_;
const void *const image_;
};
explicit ElfMemImage(const void *base);
void Init(const void *base);
bool IsPresent() const { return ehdr_ != NULL; }
const ElfW(Phdr)* GetPhdr(int index) const;
const ElfW(Sym)* GetDynsym(int index) const;
const ElfW(Versym)* GetVersym(int index) const;
const ElfW(Verdef)* GetVerdef(int index) const;
const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
const char* GetDynstr(ElfW(Word) offset) const;
const void* GetSymAddr(const ElfW(Sym) *sym) const;
const char* GetVerstr(ElfW(Word) offset) const;
int GetNumSymbols() const;
SymbolIterator begin() const;
SymbolIterator end() const;
// Look up versioned dynamic symbol in the image.
// Returns false if image is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out != NULL, additional details are filled in.
bool LookupSymbol(const char *name, const char *version,
int symbol_type, SymbolInfo *info_out) const;
// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if image isn't present
// or doesn't have a symbol overlapping given address.
// If info_out != NULL, additional details are filled in.
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
private:
const ElfW(Ehdr) *ehdr_;
const ElfW(Sym) *dynsym_;
const ElfW(Versym) *versym_;
const ElfW(Verdef) *verdef_;
const ElfW(Word) *hash_;
const char *dynstr_;
size_t strsize_;
size_t verdefnum_;
ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
};
} // namespace base
#endif // __ELF__ and __GLIBC__ and !__native_client__
#endif // BASE_ELF_MEM_IMAGE_H_

View File

@ -1,401 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005-2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke, Carl Crous
*/
#ifndef _ELFCORE_H
#define _ELFCORE_H
#ifdef __cplusplus
extern "C" {
#endif
/* We currently only support x86-32, x86-64, ARM, MIPS, PPC on Linux.
* Porting to other related platforms should not be difficult.
*/
#if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
defined(__mips__) || defined(__PPC__)) && defined(__linux)
#include <stdarg.h>
#include <stdint.h>
#include <sys/types.h>
#include "../config.h"
/* Define the DUMPER symbol to make sure that there is exactly one
* core dumper built into the library.
*/
#define DUMPER "ELF"
/* By the time that we get a chance to read CPU registers in the
* calling thread, they are already in a not particularly useful
* state. Besides, there will be multiple frames on the stack that are
* just making the core file confusing. To fix this problem, we take a
* snapshot of the frame pointer, stack pointer, and instruction
* pointer at an earlier time, and then insert these values into the
* core file.
*/
#if defined(__i386__) || defined(__x86_64__)
typedef struct i386_regs { /* Normal (non-FPU) CPU registers */
#ifdef __x86_64__
#define BP rbp
#define SP rsp
#define IP rip
uint64_t r15,r14,r13,r12,rbp,rbx,r11,r10;
uint64_t r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax;
uint64_t rip,cs,eflags;
uint64_t rsp,ss;
uint64_t fs_base, gs_base;
uint64_t ds,es,fs,gs;
#else
#define BP ebp
#define SP esp
#define IP eip
uint32_t ebx, ecx, edx, esi, edi, ebp, eax;
uint16_t ds, __ds, es, __es;
uint16_t fs, __fs, gs, __gs;
uint32_t orig_eax, eip;
uint16_t cs, __cs;
uint32_t eflags, esp;
uint16_t ss, __ss;
#endif
} i386_regs;
#elif defined(__arm__)
typedef struct arm_regs { /* General purpose registers */
#define BP uregs[11] /* Frame pointer */
#define SP uregs[13] /* Stack pointer */
#define IP uregs[15] /* Program counter */
#define LR uregs[14] /* Link register */
long uregs[18];
} arm_regs;
#elif defined(__mips__)
typedef struct mips_regs {
unsigned long pad[6]; /* Unused padding to match kernel structures */
unsigned long uregs[32]; /* General purpose registers. */
unsigned long hi; /* Used for multiplication and division. */
unsigned long lo;
unsigned long cp0_epc; /* Program counter. */
unsigned long cp0_badvaddr;
unsigned long cp0_status;
unsigned long cp0_cause;
unsigned long unused;
} mips_regs;
#elif defined (__PPC__)
typedef struct ppc_regs {
#define SP uregs[1] /* Stack pointer */
#define IP rip /* Program counter */
#define LR lr /* Link register */
unsigned long uregs[32]; /* General Purpose Registers - r0-r31. */
double fpr[32]; /* Floating-Point Registers - f0-f31. */
unsigned long rip; /* Program counter. */
unsigned long msr;
unsigned long ccr;
unsigned long lr;
unsigned long ctr;
unsigned long xeq;
unsigned long mq;
} ppc_regs;
#endif
#if defined(__i386__) && defined(__GNUC__)
/* On x86 we provide an optimized version of the FRAME() macro, if the
* compiler supports a GCC-style asm() directive. This results in somewhat
* more accurate values for CPU registers.
*/
typedef struct Frame {
struct i386_regs uregs;
int errno_;
pid_t tid;
} Frame;
#define FRAME(f) Frame f; \
do { \
f.errno_ = errno; \
f.tid = sys_gettid(); \
__asm__ volatile ( \
"push %%ebp\n" \
"push %%ebx\n" \
"mov %%ebx,0(%%eax)\n" \
"mov %%ecx,4(%%eax)\n" \
"mov %%edx,8(%%eax)\n" \
"mov %%esi,12(%%eax)\n" \
"mov %%edi,16(%%eax)\n" \
"mov %%ebp,20(%%eax)\n" \
"mov %%eax,24(%%eax)\n" \
"mov %%ds,%%ebx\n" \
"mov %%ebx,28(%%eax)\n" \
"mov %%es,%%ebx\n" \
"mov %%ebx,32(%%eax)\n" \
"mov %%fs,%%ebx\n" \
"mov %%ebx,36(%%eax)\n" \
"mov %%gs,%%ebx\n" \
"mov %%ebx, 40(%%eax)\n" \
"call 0f\n" \
"0:pop %%ebx\n" \
"add $1f-0b,%%ebx\n" \
"mov %%ebx,48(%%eax)\n" \
"mov %%cs,%%ebx\n" \
"mov %%ebx,52(%%eax)\n" \
"pushf\n" \
"pop %%ebx\n" \
"mov %%ebx,56(%%eax)\n" \
"mov %%esp,%%ebx\n" \
"add $8,%%ebx\n" \
"mov %%ebx,60(%%eax)\n" \
"mov %%ss,%%ebx\n" \
"mov %%ebx,64(%%eax)\n" \
"pop %%ebx\n" \
"pop %%ebp\n" \
"1:" \
: : "a" (&f) : "memory"); \
} while (0)
#define SET_FRAME(f,r) \
do { \
errno = (f).errno_; \
(r) = (f).uregs; \
} while (0)
#elif defined(__x86_64__) && defined(__GNUC__)
/* The FRAME and SET_FRAME macros for x86_64. */
typedef struct Frame {
struct i386_regs uregs;
int errno_;
pid_t tid;
} Frame;
#define FRAME(f) Frame f; \
do { \
f.errno_ = errno; \
f.tid = sys_gettid(); \
__asm__ volatile ( \
"push %%rbp\n" \
"push %%rbx\n" \
"mov %%r15,0(%%rax)\n" \
"mov %%r14,8(%%rax)\n" \
"mov %%r13,16(%%rax)\n" \
"mov %%r12,24(%%rax)\n" \
"mov %%rbp,32(%%rax)\n" \
"mov %%rbx,40(%%rax)\n" \
"mov %%r11,48(%%rax)\n" \
"mov %%r10,56(%%rax)\n" \
"mov %%r9,64(%%rax)\n" \
"mov %%r8,72(%%rax)\n" \
"mov %%rax,80(%%rax)\n" \
"mov %%rcx,88(%%rax)\n" \
"mov %%rdx,96(%%rax)\n" \
"mov %%rsi,104(%%rax)\n" \
"mov %%rdi,112(%%rax)\n" \
"mov %%ds,%%rbx\n" \
"mov %%rbx,184(%%rax)\n" \
"mov %%es,%%rbx\n" \
"mov %%rbx,192(%%rax)\n" \
"mov %%fs,%%rbx\n" \
"mov %%rbx,200(%%rax)\n" \
"mov %%gs,%%rbx\n" \
"mov %%rbx,208(%%rax)\n" \
"call 0f\n" \
"0:pop %%rbx\n" \
"add $1f-0b,%%rbx\n" \
"mov %%rbx,128(%%rax)\n" \
"mov %%cs,%%rbx\n" \
"mov %%rbx,136(%%rax)\n" \
"pushf\n" \
"pop %%rbx\n" \
"mov %%rbx,144(%%rax)\n" \
"mov %%rsp,%%rbx\n" \
"add $16,%%ebx\n" \
"mov %%rbx,152(%%rax)\n" \
"mov %%ss,%%rbx\n" \
"mov %%rbx,160(%%rax)\n" \
"pop %%rbx\n" \
"pop %%rbp\n" \
"1:" \
: : "a" (&f) : "memory"); \
} while (0)
#define SET_FRAME(f,r) \
do { \
errno = (f).errno_; \
(f).uregs.fs_base = (r).fs_base; \
(f).uregs.gs_base = (r).gs_base; \
(r) = (f).uregs; \
} while (0)
#elif defined(__arm__) && defined(__GNUC__)
/* ARM calling conventions are a little more tricky. A little assembly
* helps in obtaining an accurate snapshot of all registers.
*/
typedef struct Frame {
struct arm_regs arm;
int errno_;
pid_t tid;
} Frame;
#define FRAME(f) Frame f; \
do { \
long cpsr; \
f.errno_ = errno; \
f.tid = sys_gettid(); \
__asm__ volatile( \
"stmia %0, {r0-r15}\n" /* All integer regs */\
: : "r"(&f.arm) : "memory"); \
f.arm.uregs[16] = 0; \
__asm__ volatile( \
"mrs %0, cpsr\n" /* Condition code reg */\
: "=r"(cpsr)); \
f.arm.uregs[17] = cpsr; \
} while (0)
#define SET_FRAME(f,r) \
do { \
/* Don't override the FPU status register. */\
/* Use the value obtained from ptrace(). This*/\
/* works, because our code does not perform */\
/* any FPU operations, itself. */\
long fps = (f).arm.uregs[16]; \
errno = (f).errno_; \
(r) = (f).arm; \
(r).uregs[16] = fps; \
} while (0)
#elif defined(__mips__) && defined(__GNUC__)
typedef struct Frame {
struct mips_regs mips_regs;
int errno_;
pid_t tid;
} Frame;
#define MIPSREG(n) ({ register unsigned long r __asm__("$"#n); r; })
#define FRAME(f) Frame f = { 0 }; \
do { \
unsigned long hi, lo; \
register unsigned long pc __asm__("$31"); \
f.mips_regs.uregs[ 0] = MIPSREG( 0); \
f.mips_regs.uregs[ 1] = MIPSREG( 1); \
f.mips_regs.uregs[ 2] = MIPSREG( 2); \
f.mips_regs.uregs[ 3] = MIPSREG( 3); \
f.mips_regs.uregs[ 4] = MIPSREG( 4); \
f.mips_regs.uregs[ 5] = MIPSREG( 5); \
f.mips_regs.uregs[ 6] = MIPSREG( 6); \
f.mips_regs.uregs[ 7] = MIPSREG( 7); \
f.mips_regs.uregs[ 8] = MIPSREG( 8); \
f.mips_regs.uregs[ 9] = MIPSREG( 9); \
f.mips_regs.uregs[10] = MIPSREG(10); \
f.mips_regs.uregs[11] = MIPSREG(11); \
f.mips_regs.uregs[12] = MIPSREG(12); \
f.mips_regs.uregs[13] = MIPSREG(13); \
f.mips_regs.uregs[14] = MIPSREG(14); \
f.mips_regs.uregs[15] = MIPSREG(15); \
f.mips_regs.uregs[16] = MIPSREG(16); \
f.mips_regs.uregs[17] = MIPSREG(17); \
f.mips_regs.uregs[18] = MIPSREG(18); \
f.mips_regs.uregs[19] = MIPSREG(19); \
f.mips_regs.uregs[20] = MIPSREG(20); \
f.mips_regs.uregs[21] = MIPSREG(21); \
f.mips_regs.uregs[22] = MIPSREG(22); \
f.mips_regs.uregs[23] = MIPSREG(23); \
f.mips_regs.uregs[24] = MIPSREG(24); \
f.mips_regs.uregs[25] = MIPSREG(25); \
f.mips_regs.uregs[26] = MIPSREG(26); \
f.mips_regs.uregs[27] = MIPSREG(27); \
f.mips_regs.uregs[28] = MIPSREG(28); \
f.mips_regs.uregs[29] = MIPSREG(29); \
f.mips_regs.uregs[30] = MIPSREG(30); \
f.mips_regs.uregs[31] = MIPSREG(31); \
__asm__ volatile ("mfhi %0" : "=r"(hi)); \
__asm__ volatile ("mflo %0" : "=r"(lo)); \
__asm__ volatile ("jal 1f; 1:nop" : "=r"(pc)); \
f.mips_regs.hi = hi; \
f.mips_regs.lo = lo; \
f.mips_regs.cp0_epc = pc; \
f.errno_ = errno; \
f.tid = sys_gettid(); \
} while (0)
#define SET_FRAME(f,r) \
do { \
errno = (f).errno_; \
memcpy((r).uregs, (f).mips_regs.uregs, \
32*sizeof(unsigned long)); \
(r).hi = (f).mips_regs.hi; \
(r).lo = (f).mips_regs.lo; \
(r).cp0_epc = (f).mips_regs.cp0_epc; \
} while (0)
#else
/* If we do not have a hand-optimized assembly version of the FRAME()
* macro, we cannot reliably unroll the stack. So, we show a few additional
* stack frames for the coredumper.
*/
typedef struct Frame {
pid_t tid;
} Frame;
#define FRAME(f) Frame f; do { f.tid = sys_gettid(); } while (0)
#define SET_FRAME(f,r) do { } while (0)
#endif
/* Internal function for generating a core file. This API can change without
* notice and is only supposed to be used internally by the core dumper.
*
* This function works for both single- and multi-threaded core
* dumps. If called as
*
* FRAME(frame);
* InternalGetCoreDump(&frame, 0, NULL, ap);
*
* it creates a core file that only contains information about the
* calling thread.
*
* Optionally, the caller can provide information about other threads
* by passing their process ids in "thread_pids". The process id of
* the caller should not be included in this array. All of the threads
* must have been attached to with ptrace(), prior to calling this
* function. They will be detached when "InternalGetCoreDump()" returns.
*
* This function either returns a file handle that can be read for obtaining
* a core dump, or "-1" in case of an error. In the latter case, "errno"
* will be set appropriately.
*
* While "InternalGetCoreDump()" is not technically async signal safe, you
* might be tempted to invoke it from a signal handler. The code goes to
* great lengths to make a best effort that this will actually work. But in
* any case, you must make sure that you preserve the value of "errno"
* yourself. It is guaranteed to be clobbered otherwise.
*
* Also, "InternalGetCoreDump" is not strictly speaking re-entrant. Again,
* it makes a best effort to behave reasonably when called in a multi-
* threaded environment, but it is ultimately the caller's responsibility
* to provide locking.
*/
int InternalGetCoreDump(void *frame, int num_threads, pid_t *thread_pids,
va_list ap
/* const struct CoreDumpParameters *params,
const char *file_name,
const char *PATH
*/);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ELFCORE_H */

View File

@ -1,74 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Jacob Hoffman-Andrews
#ifndef _GOOGLEINIT_H
#define _GOOGLEINIT_H
#include "base/logging.h"
class GoogleInitializer {
public:
typedef void (*VoidFunction)(void);
GoogleInitializer(const char* name, VoidFunction ctor, VoidFunction dtor)
: name_(name), destructor_(dtor) {
RAW_VLOG(10, "<GoogleModuleObject> constructing: %s\n", name_);
if (ctor)
ctor();
}
~GoogleInitializer() {
RAW_VLOG(10, "<GoogleModuleObject> destroying: %s\n", name_);
if (destructor_)
destructor_();
}
private:
const char* const name_;
const VoidFunction destructor_;
};
#define REGISTER_MODULE_INITIALIZER(name, body) \
namespace { \
static void google_init_module_##name () { body; } \
GoogleInitializer google_initializer_module_##name(#name, \
google_init_module_##name, NULL); \
}
#define REGISTER_MODULE_DESTRUCTOR(name, body) \
namespace { \
static void google_destruct_module_##name () { body; } \
GoogleInitializer google_destructor_module_##name(#name, \
NULL, google_destruct_module_##name); \
}
#endif /* _GOOGLEINIT_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,707 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#include "base/linuxthreads.h"
#ifdef THREADS
#ifdef __cplusplus
extern "C" {
#endif
#include <sched.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <sys/prctl.h>
#include <semaphore.h>
#include "base/linux_syscall_support.h"
#include "base/thread_lister.h"
#ifndef CLONE_UNTRACED
#define CLONE_UNTRACED 0x00800000
#endif
/* Synchronous signals that should not be blocked while in the lister thread.
*/
static const int sync_signals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
SIGXCPU, SIGXFSZ };
/* itoa() is not a standard function, and we cannot safely call printf()
* after suspending threads. So, we just implement our own copy. A
* recursive approach is the easiest here.
*/
static char *local_itoa(char *buf, int i) {
if (i < 0) {
*buf++ = '-';
return local_itoa(buf, -i);
} else {
if (i >= 10)
buf = local_itoa(buf, i/10);
*buf++ = (i%10) + '0';
*buf = '\000';
return buf;
}
}
/* Wrapper around clone() that runs "fn" on the same stack as the
* caller! Unlike fork(), the cloned thread shares the same address space.
* The caller must be careful to use only minimal amounts of stack until
* the cloned thread has returned.
* There is a good chance that the cloned thread and the caller will share
* the same copy of errno!
*/
#ifdef __GNUC__
#if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3
/* Try to force this function into a separate stack frame, and make sure
* that arguments are passed on the stack.
*/
static int local_clone (int (*fn)(void *), void *arg, ...)
__attribute__ ((noinline));
#endif
#endif
/* To avoid the gap cross page boundaries, increase by the large parge
* size mostly PowerPC system uses. */
#ifdef __PPC64__
#define CLONE_STACK_SIZE 65536
#else
#define CLONE_STACK_SIZE 4096
#endif
static int local_clone (int (*fn)(void *), void *arg, ...) {
/* Leave 4kB of gap between the callers stack and the new clone. This
* should be more than sufficient for the caller to call waitpid() until
* the cloned thread terminates.
*
* It is important that we set the CLONE_UNTRACED flag, because newer
* versions of "gdb" otherwise attempt to attach to our thread, and will
* attempt to reap its status codes. This subsequently results in the
* caller hanging indefinitely in waitpid(), waiting for a change in
* status that will never happen. By setting the CLONE_UNTRACED flag, we
* prevent "gdb" from stealing events, but we still expect the thread
* lister to fail, because it cannot PTRACE_ATTACH to the process that
* is being debugged. This is OK and the error code will be reported
* correctly.
*/
return sys_clone(fn, (char *)&arg - CLONE_STACK_SIZE,
CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_UNTRACED, arg, 0, 0, 0);
}
/* Local substitute for the atoi() function, which is not necessarily safe
* to call once threads are suspended (depending on whether libc looks up
* locale information, when executing atoi()).
*/
static int local_atoi(const char *s) {
int n = 0;
int neg = *s == '-';
if (neg)
s++;
while (*s >= '0' && *s <= '9')
n = 10*n + (*s++ - '0');
return neg ? -n : n;
}
/* Re-runs fn until it doesn't cause EINTR
*/
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
/* Wrap a class around system calls, in order to give us access to
* a private copy of errno. This only works in C++, but it has the
* advantage of not needing nested functions, which are a non-standard
* language extension.
*/
#ifdef __cplusplus
namespace {
class SysCalls {
public:
#define SYS_CPLUSPLUS
#define SYS_ERRNO my_errno
#define SYS_INLINE inline
#define SYS_PREFIX -1
#undef SYS_LINUX_SYSCALL_SUPPORT_H
#include "linux_syscall_support.h"
SysCalls() : my_errno(0) { }
int my_errno;
};
}
#define ERRNO sys.my_errno
#else
#define ERRNO my_errno
#endif
/* Wrapper for open() which is guaranteed to never return EINTR.
*/
static int c_open(const char *fname, int flags, int mode) {
ssize_t rc;
NO_INTR(rc = sys_open(fname, flags, mode));
return rc;
}
/* abort() is not safely reentrant, and changes it's behavior each time
* it is called. This means, if the main application ever called abort()
* we cannot safely call it again. This would happen if we were called
* from a SIGABRT signal handler in the main application. So, document
* that calling SIGABRT from the thread lister makes it not signal safe
* (and vice-versa).
* Also, since we share address space with the main application, we
* cannot call abort() from the callback and expect the main application
* to behave correctly afterwards. In fact, the only thing we can do, is
* to terminate the main application with extreme prejudice (aka
* PTRACE_KILL).
* We set up our own SIGABRT handler to do this.
* In order to find the main application from the signal handler, we
* need to store information about it in global variables. This is
* safe, because the main application should be suspended at this
* time. If the callback ever called TCMalloc_ResumeAllProcessThreads(), then
* we are running a higher risk, though. So, try to avoid calling
* abort() after calling TCMalloc_ResumeAllProcessThreads.
*/
static volatile int *sig_pids, sig_num_threads, sig_proc, sig_marker;
/* Signal handler to help us recover from dying while we are attached to
* other threads.
*/
static void SignalHandler(int signum, siginfo_t *si, void *data) {
if (sig_pids != NULL) {
if (signum == SIGABRT) {
while (sig_num_threads-- > 0) {
/* Not sure if sched_yield is really necessary here, but it does not */
/* hurt, and it might be necessary for the same reasons that we have */
/* to do so in sys_ptrace_detach(). */
sys_sched_yield();
sys_ptrace(PTRACE_KILL, sig_pids[sig_num_threads], 0, 0);
}
} else if (sig_num_threads > 0) {
TCMalloc_ResumeAllProcessThreads(sig_num_threads, (int *)sig_pids);
}
}
sig_pids = NULL;
if (sig_marker >= 0)
NO_INTR(sys_close(sig_marker));
sig_marker = -1;
if (sig_proc >= 0)
NO_INTR(sys_close(sig_proc));
sig_proc = -1;
sys__exit(signum == SIGABRT ? 1 : 2);
}
/* Try to dirty the stack, and hope that the compiler is not smart enough
* to optimize this function away. Or worse, the compiler could inline the
* function and permanently allocate the data on the stack.
*/
static void DirtyStack(size_t amount) {
char buf[amount];
memset(buf, 0, amount);
sys_read(-1, buf, amount);
}
/* Data structure for passing arguments to the lister thread.
*/
#define ALT_STACKSIZE (MINSIGSTKSZ + 4096)
struct ListerParams {
int result, err;
char *altstack_mem;
ListAllProcessThreadsCallBack callback;
void *parameter;
va_list ap;
sem_t *lock;
};
static void ListerThread(struct ListerParams *args) {
int found_parent = 0;
pid_t clone_pid = sys_gettid(), ppid = sys_getppid();
char proc_self_task[80], marker_name[48], *marker_path;
const char *proc_paths[3];
const char *const *proc_path = proc_paths;
int proc = -1, marker = -1, num_threads = 0;
int max_threads = 0, sig;
struct kernel_stat marker_sb, proc_sb;
stack_t altstack;
/* Wait for parent thread to set appropriate permissions
* to allow ptrace activity
*/
if (sem_wait(args->lock) < 0) {
goto failure;
}
/* Create "marker" that we can use to detect threads sharing the same
* address space and the same file handles. By setting the FD_CLOEXEC flag
* we minimize the risk of misidentifying child processes as threads;
* and since there is still a race condition, we will filter those out
* later, anyway.
*/
if ((marker = sys_socket(PF_LOCAL, SOCK_DGRAM, 0)) < 0 ||
sys_fcntl(marker, F_SETFD, FD_CLOEXEC) < 0) {
failure:
args->result = -1;
args->err = errno;
if (marker >= 0)
NO_INTR(sys_close(marker));
sig_marker = marker = -1;
if (proc >= 0)
NO_INTR(sys_close(proc));
sig_proc = proc = -1;
sys__exit(1);
}
/* Compute search paths for finding thread directories in /proc */
local_itoa(strrchr(strcpy(proc_self_task, "/proc/"), '\000'), ppid);
strcpy(marker_name, proc_self_task);
marker_path = marker_name + strlen(marker_name);
strcat(proc_self_task, "/task/");
proc_paths[0] = proc_self_task; /* /proc/$$/task/ */
proc_paths[1] = "/proc/"; /* /proc/ */
proc_paths[2] = NULL;
/* Compute path for marker socket in /proc */
local_itoa(strcpy(marker_path, "/fd/") + 4, marker);
if (sys_stat(marker_name, &marker_sb) < 0) {
goto failure;
}
/* Catch signals on an alternate pre-allocated stack. This way, we can
* safely execute the signal handler even if we ran out of memory.
*/
memset(&altstack, 0, sizeof(altstack));
altstack.ss_sp = args->altstack_mem;
altstack.ss_flags = 0;
altstack.ss_size = ALT_STACKSIZE;
sys_sigaltstack(&altstack, (const stack_t *)NULL);
/* Some kernels forget to wake up traced processes, when the
* tracer dies. So, intercept synchronous signals and make sure
* that we wake up our tracees before dying. It is the caller's
* responsibility to ensure that asynchronous signals do not
* interfere with this function.
*/
sig_marker = marker;
sig_proc = -1;
for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
struct kernel_sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction_ = SignalHandler;
sys_sigfillset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK|SA_SIGINFO|SA_RESETHAND;
sys_sigaction(sync_signals[sig], &sa, (struct kernel_sigaction *)NULL);
}
/* Read process directories in /proc/... */
for (;;) {
/* Some kernels know about threads, and hide them in "/proc"
* (although they are still there, if you know the process
* id). Threads are moved into a separate "task" directory. We
* check there first, and then fall back on the older naming
* convention if necessary.
*/
if ((sig_proc = proc = c_open(*proc_path, O_RDONLY|O_DIRECTORY, 0)) < 0) {
if (*++proc_path != NULL)
continue;
goto failure;
}
if (sys_fstat(proc, &proc_sb) < 0)
goto failure;
/* Since we are suspending threads, we cannot call any libc
* functions that might acquire locks. Most notably, we cannot
* call malloc(). So, we have to allocate memory on the stack,
* instead. Since we do not know how much memory we need, we
* make a best guess. And if we guessed incorrectly we retry on
* a second iteration (by jumping to "detach_threads").
*
* Unless the number of threads is increasing very rapidly, we
* should never need to do so, though, as our guestimate is very
* conservative.
*/
if (max_threads < proc_sb.st_nlink + 100)
max_threads = proc_sb.st_nlink + 100;
/* scope */ {
pid_t pids[max_threads];
int added_entries = 0;
sig_num_threads = num_threads;
sig_pids = pids;
for (;;) {
struct KERNEL_DIRENT *entry;
char buf[4096];
ssize_t nbytes = GETDENTS(proc, (struct KERNEL_DIRENT *)buf,
sizeof(buf));
if (nbytes < 0)
goto failure;
else if (nbytes == 0) {
if (added_entries) {
/* Need to keep iterating over "/proc" in multiple
* passes until we no longer find any more threads. This
* algorithm eventually completes, when all threads have
* been suspended.
*/
added_entries = 0;
sys_lseek(proc, 0, SEEK_SET);
continue;
}
break;
}
for (entry = (struct KERNEL_DIRENT *)buf;
entry < (struct KERNEL_DIRENT *)&buf[nbytes];
entry = (struct KERNEL_DIRENT *)((char *)entry+entry->d_reclen)) {
if (entry->d_ino != 0) {
const char *ptr = entry->d_name;
pid_t pid;
/* Some kernels hide threads by preceding the pid with a '.' */
if (*ptr == '.')
ptr++;
/* If the directory is not numeric, it cannot be a
* process/thread
*/
if (*ptr < '0' || *ptr > '9')
continue;
pid = local_atoi(ptr);
/* Attach (and suspend) all threads */
if (pid && pid != clone_pid) {
struct kernel_stat tmp_sb;
char fname[entry->d_reclen + 48];
strcat(strcat(strcpy(fname, "/proc/"),
entry->d_name), marker_path);
/* Check if the marker is identical to the one we created */
if (sys_stat(fname, &tmp_sb) >= 0 &&
marker_sb.st_ino == tmp_sb.st_ino) {
long i, j;
/* Found one of our threads, make sure it is no duplicate */
for (i = 0; i < num_threads; i++) {
/* Linear search is slow, but should not matter much for
* the typically small number of threads.
*/
if (pids[i] == pid) {
/* Found a duplicate; most likely on second pass */
goto next_entry;
}
}
/* Check whether data structure needs growing */
if (num_threads >= max_threads) {
/* Back to square one, this time with more memory */
NO_INTR(sys_close(proc));
goto detach_threads;
}
/* Attaching to thread suspends it */
pids[num_threads++] = pid;
sig_num_threads = num_threads;
if (sys_ptrace(PTRACE_ATTACH, pid, (void *)0,
(void *)0) < 0) {
/* If operation failed, ignore thread. Maybe it
* just died? There might also be a race
* condition with a concurrent core dumper or
* with a debugger. In that case, we will just
* make a best effort, rather than failing
* entirely.
*/
num_threads--;
sig_num_threads = num_threads;
goto next_entry;
}
while (sys_waitpid(pid, (int *)0, __WALL) < 0) {
if (errno != EINTR) {
sys_ptrace_detach(pid);
num_threads--;
sig_num_threads = num_threads;
goto next_entry;
}
}
if (sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i++ != j ||
sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i != j) {
/* Address spaces are distinct, even though both
* processes show the "marker". This is probably
* a forked child process rather than a thread.
*/
sys_ptrace_detach(pid);
num_threads--;
sig_num_threads = num_threads;
} else {
found_parent |= pid == ppid;
added_entries++;
}
}
}
}
next_entry:;
}
}
NO_INTR(sys_close(proc));
sig_proc = proc = -1;
/* If we failed to find any threads, try looking somewhere else in
* /proc. Maybe, threads are reported differently on this system.
*/
if (num_threads > 1 || !*++proc_path) {
NO_INTR(sys_close(marker));
sig_marker = marker = -1;
/* If we never found the parent process, something is very wrong.
* Most likely, we are running in debugger. Any attempt to operate
* on the threads would be very incomplete. Let's just report an
* error to the caller.
*/
if (!found_parent) {
TCMalloc_ResumeAllProcessThreads(num_threads, pids);
sys__exit(3);
}
/* Now we are ready to call the callback,
* which takes care of resuming the threads for us.
*/
args->result = args->callback(args->parameter, num_threads,
pids, args->ap);
args->err = errno;
/* Callback should have resumed threads, but better safe than sorry */
if (TCMalloc_ResumeAllProcessThreads(num_threads, pids)) {
/* Callback forgot to resume at least one thread, report error */
args->err = EINVAL;
args->result = -1;
}
sys__exit(0);
}
detach_threads:
/* Resume all threads prior to retrying the operation */
TCMalloc_ResumeAllProcessThreads(num_threads, pids);
sig_pids = NULL;
num_threads = 0;
sig_num_threads = num_threads;
max_threads += 100;
}
}
}
/* This function gets the list of all linux threads of the current process
* passes them to the 'callback' along with the 'parameter' pointer; at the
* call back call time all the threads are paused via
* PTRACE_ATTACH.
* The callback is executed from a separate thread which shares only the
* address space, the filesystem, and the filehandles with the caller. Most
* notably, it does not share the same pid and ppid; and if it terminates,
* the rest of the application is still there. 'callback' is supposed to do
* or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if
* the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
* signals are blocked. If the 'callback' decides to unblock them, it must
* ensure that they cannot terminate the application, or that
* TCMalloc_ResumeAllProcessThreads will get called.
* It is an error for the 'callback' to make any library calls that could
* acquire locks. Most notably, this means that most system calls have to
* avoid going through libc. Also, this means that it is not legal to call
* exit() or abort().
* We return -1 on error and the return value of 'callback' on success.
*/
int TCMalloc_ListAllProcessThreads(void *parameter,
ListAllProcessThreadsCallBack callback, ...) {
char altstack_mem[ALT_STACKSIZE];
struct ListerParams args;
pid_t clone_pid;
int dumpable = 1, sig;
struct kernel_sigset_t sig_blocked, sig_old;
sem_t lock;
va_start(args.ap, callback);
/* If we are short on virtual memory, initializing the alternate stack
* might trigger a SIGSEGV. Let's do this early, before it could get us
* into more trouble (i.e. before signal handlers try to use the alternate
* stack, and before we attach to other threads).
*/
memset(altstack_mem, 0, sizeof(altstack_mem));
/* Some of our cleanup functions could conceivable use more stack space.
* Try to touch the stack right now. This could be defeated by the compiler
* being too smart for it's own good, so try really hard.
*/
DirtyStack(32768);
/* Make this process "dumpable". This is necessary in order to ptrace()
* after having called setuid().
*/
dumpable = sys_prctl(PR_GET_DUMPABLE, 0);
if (!dumpable)
sys_prctl(PR_SET_DUMPABLE, 1);
/* Fill in argument block for dumper thread */
args.result = -1;
args.err = 0;
args.altstack_mem = altstack_mem;
args.parameter = parameter;
args.callback = callback;
args.lock = &lock;
/* Before cloning the thread lister, block all asynchronous signals, as we */
/* are not prepared to handle them. */
sys_sigfillset(&sig_blocked);
for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
sys_sigdelset(&sig_blocked, sync_signals[sig]);
}
if (sys_sigprocmask(SIG_BLOCK, &sig_blocked, &sig_old)) {
args.err = errno;
args.result = -1;
goto failed;
}
/* scope */ {
/* After cloning, both the parent and the child share the same instance
* of errno. We must make sure that at least one of these processes
* (in our case, the parent) uses modified syscall macros that update
* a local copy of errno, instead.
*/
#ifdef __cplusplus
#define sys0_sigprocmask sys.sigprocmask
#define sys0_waitpid sys.waitpid
SysCalls sys;
#else
int my_errno;
#define SYS_ERRNO my_errno
#define SYS_INLINE inline
#define SYS_PREFIX 0
#undef SYS_LINUX_SYSCALL_SUPPORT_H
#include "linux_syscall_support.h"
#endif
/* Lock before clone so that parent can set
* ptrace permissions (if necessary) prior
* to ListerThread actually executing
*/
if (sem_init(&lock, 0, 0) == 0) {
int clone_errno;
clone_pid = local_clone((int (*)(void *))ListerThread, &args);
clone_errno = errno;
sys_sigprocmask(SIG_SETMASK, &sig_old, &sig_old);
if (clone_pid >= 0) {
#ifdef PR_SET_PTRACER
/* In newer versions of glibc permission must explicitly
* be given to allow for ptrace.
*/
prctl(PR_SET_PTRACER, clone_pid, 0, 0, 0);
#endif
/* Releasing the lock here allows the
* ListerThread to execute and ptrace us.
*/
sem_post(&lock);
int status, rc;
while ((rc = sys0_waitpid(clone_pid, &status, __WALL)) < 0 &&
ERRNO == EINTR) {
/* Keep waiting */
}
if (rc < 0) {
args.err = ERRNO;
args.result = -1;
} else if (WIFEXITED(status)) {
switch (WEXITSTATUS(status)) {
case 0: break; /* Normal process termination */
case 2: args.err = EFAULT; /* Some fault (e.g. SIGSEGV) detected */
args.result = -1;
break;
case 3: args.err = EPERM; /* Process is already being traced */
args.result = -1;
break;
default:args.err = ECHILD; /* Child died unexpectedly */
args.result = -1;
break;
}
} else if (!WIFEXITED(status)) {
args.err = EFAULT; /* Terminated due to an unhandled signal*/
args.result = -1;
}
sem_destroy(&lock);
} else {
args.result = -1;
args.err = clone_errno;
}
} else {
args.result = -1;
args.err = errno;
}
}
/* Restore the "dumpable" state of the process */
failed:
if (!dumpable)
sys_prctl(PR_SET_DUMPABLE, dumpable);
va_end(args.ap);
errno = args.err;
return args.result;
}
/* This function resumes the list of all linux threads that
* TCMalloc_ListAllProcessThreads pauses before giving to its callback.
* The function returns non-zero if at least one thread was
* suspended and has now been resumed.
*/
int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) {
int detached_at_least_one = 0;
while (num_threads-- > 0) {
detached_at_least_one |= sys_ptrace_detach(thread_pids[num_threads]) >= 0;
}
return detached_at_least_one;
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,54 +0,0 @@
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#ifndef _LINUXTHREADS_H
#define _LINUXTHREADS_H
/* Include thread_lister.h to get the interface that we implement for linux.
*/
/* We currently only support certain platforms on Linux. Porting to other
* related platforms should not be difficult.
*/
#if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
defined(__mips__) || defined(__PPC__) || defined(__aarch64__) || \
defined(__s390__)) && defined(__linux)
/* Define the THREADS symbol to make sure that there is exactly one core dumper
* built into the library.
*/
#define THREADS "Linux /proc"
#endif
#endif /* _LINUXTHREADS_H */

View File

@ -1,108 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file just provides storage for FLAGS_verbose.
#include "../config.h"
#include "base/logging.h"
#include "base/commandlineflags.h"
DEFINE_int32(verbose, EnvToInt("PERFTOOLS_VERBOSE", 0),
"Set to numbers >0 for more verbose output, or <0 for less. "
"--verbose == -4 means we log fatal errors only.");
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
// While windows does have a POSIX-compatible API
// (_open/_write/_close), it acquires memory. Using this lower-level
// windows API is the closest we can get to being "raw".
RawFD RawOpenForWriting(const char* filename) {
// CreateFile allocates memory if file_name isn't absolute, so if
// that ever becomes a problem then we ought to compute the absolute
// path on its behalf (perhaps the ntdll/kernel function isn't aware
// of the working directory?)
RawFD fd = CreateFileA(filename, GENERIC_WRITE, 0, NULL,
CREATE_ALWAYS, 0, NULL);
if (fd != kIllegalRawFD && GetLastError() == ERROR_ALREADY_EXISTS)
SetEndOfFile(fd); // truncate the existing file
return fd;
}
void RawWrite(RawFD handle, const char* buf, size_t len) {
while (len > 0) {
DWORD wrote;
BOOL ok = WriteFile(handle, buf, len, &wrote, NULL);
// We do not use an asynchronous file handle, so ok==false means an error
if (!ok) break;
buf += wrote;
len -= wrote;
}
}
void RawClose(RawFD handle) {
CloseHandle(handle);
}
#else // _WIN32 || __CYGWIN__ || __CYGWIN32__
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
// Re-run fn until it doesn't cause EINTR.
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
RawFD RawOpenForWriting(const char* filename) {
return open(filename, O_WRONLY|O_CREAT|O_TRUNC, 0664);
}
void RawWrite(RawFD fd, const char* buf, size_t len) {
while (len > 0) {
ssize_t r;
NO_INTR(r = write(fd, buf, len));
if (r <= 0) break;
buf += r;
len -= r;
}
}
void RawClose(RawFD fd) {
NO_INTR(close(fd));
}
#endif // _WIN32 || __CYGWIN__ || __CYGWIN32__

View File

@ -1,259 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file contains #include information about logging-related stuff.
// Pretty much everybody needs to #include this file so that they can
// log various happenings.
//
#ifndef _LOGGING_H_
#define _LOGGING_H_
#include "../config.h"
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for write()
#endif
#include <string.h> // for strlen(), strcmp()
#include <assert.h>
#include <errno.h> // for errno
#include "base/commandlineflags.h"
// On some systems (like freebsd), we can't call write() at all in a
// global constructor, perhaps because errno hasn't been set up.
// (In windows, we can't call it because it might call malloc.)
// Calling the write syscall is safer (it doesn't set errno), so we
// prefer that. Note we don't care about errno for logging: we just
// do logging on a best-effort basis.
#if defined(_MSC_VER)
#define WRITE_TO_STDERR(buf, len) WriteToStderr(buf, len); // in port.cc
#elif defined(HAVE_SYS_SYSCALL_H)
#include <sys/syscall.h>
#define WRITE_TO_STDERR(buf, len) syscall(SYS_write, STDERR_FILENO, buf, len)
#else
#define WRITE_TO_STDERR(buf, len) write(STDERR_FILENO, buf, len)
#endif
// MSVC and mingw define their own, safe version of vnsprintf (the
// windows one in broken) in port.cc. Everyone else can use the
// version here. We had to give it a unique name for windows.
#ifndef _WIN32
# define perftools_vsnprintf vsnprintf
#endif
// We log all messages at this log-level and below.
// INFO == -1, WARNING == -2, ERROR == -3, FATAL == -4
DECLARE_int32(verbose);
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by NDEBUG, so the check will be executed regardless of
// compilation mode. Therefore, it is safe to do things like:
// CHECK(fp->Write(x) == 4)
// Note we use write instead of printf/puts to avoid the risk we'll
// call malloc().
#define CHECK(condition) \
do { \
if (!(condition)) { \
WRITE_TO_STDERR("Check failed: " #condition "\n", \
sizeof("Check failed: " #condition "\n")-1); \
abort(); \
} \
} while (0)
// This takes a message to print. The name is historical.
#define RAW_CHECK(condition, message) \
do { \
if (!(condition)) { \
WRITE_TO_STDERR("Check failed: " #condition ": " message "\n", \
sizeof("Check failed: " #condition ": " message "\n")-1);\
abort(); \
} \
} while (0)
// This is like RAW_CHECK, but only in debug-mode
#ifdef NDEBUG
enum { DEBUG_MODE = 0 };
#define RAW_DCHECK(condition, message)
#else
enum { DEBUG_MODE = 1 };
#define RAW_DCHECK(condition, message) RAW_CHECK(condition, message)
#endif
// This prints errno as well. Note we use write instead of printf/puts to
// avoid the risk we'll call malloc().
#define PCHECK(condition) \
do { \
if (!(condition)) { \
const int err_no = errno; \
WRITE_TO_STDERR("Check failed: " #condition ": ", \
sizeof("Check failed: " #condition ": ")-1); \
WRITE_TO_STDERR(strerror(err_no), strlen(strerror(err_no))); \
WRITE_TO_STDERR("\n", sizeof("\n")-1); \
abort(); \
} \
} while (0)
// Helper macro for binary operators; prints the two values on error
// Don't use this macro directly in your code, use CHECK_EQ et al below
// WARNING: These don't compile correctly if one of the arguments is a pointer
// and the other is NULL. To work around this, simply static_cast NULL to the
// type of the desired pointer.
// TODO(jandrews): Also print the values in case of failure. Requires some
// sort of type-sensitive ToString() function.
#define CHECK_OP(op, val1, val2) \
do { \
if (!((val1) op (val2))) { \
fprintf(stderr, "Check failed: %s %s %s\n", #val1, #op, #val2); \
abort(); \
} \
} while (0)
#define CHECK_EQ(val1, val2) CHECK_OP(==, val1, val2)
#define CHECK_NE(val1, val2) CHECK_OP(!=, val1, val2)
#define CHECK_LE(val1, val2) CHECK_OP(<=, val1, val2)
#define CHECK_LT(val1, val2) CHECK_OP(< , val1, val2)
#define CHECK_GE(val1, val2) CHECK_OP(>=, val1, val2)
#define CHECK_GT(val1, val2) CHECK_OP(> , val1, val2)
// Synonyms for CHECK_* that are used in some unittests.
#define EXPECT_EQ(val1, val2) CHECK_EQ(val1, val2)
#define EXPECT_NE(val1, val2) CHECK_NE(val1, val2)
#define EXPECT_LE(val1, val2) CHECK_LE(val1, val2)
#define EXPECT_LT(val1, val2) CHECK_LT(val1, val2)
#define EXPECT_GE(val1, val2) CHECK_GE(val1, val2)
#define EXPECT_GT(val1, val2) CHECK_GT(val1, val2)
#define ASSERT_EQ(val1, val2) EXPECT_EQ(val1, val2)
#define ASSERT_NE(val1, val2) EXPECT_NE(val1, val2)
#define ASSERT_LE(val1, val2) EXPECT_LE(val1, val2)
#define ASSERT_LT(val1, val2) EXPECT_LT(val1, val2)
#define ASSERT_GE(val1, val2) EXPECT_GE(val1, val2)
#define ASSERT_GT(val1, val2) EXPECT_GT(val1, val2)
// As are these variants.
#define EXPECT_TRUE(cond) CHECK(cond)
#define EXPECT_FALSE(cond) CHECK(!(cond))
#define EXPECT_STREQ(a, b) CHECK(strcmp(a, b) == 0)
#define ASSERT_TRUE(cond) EXPECT_TRUE(cond)
#define ASSERT_FALSE(cond) EXPECT_FALSE(cond)
#define ASSERT_STREQ(a, b) EXPECT_STREQ(a, b)
// Used for (libc) functions that return -1 and set errno
#define CHECK_ERR(invocation) PCHECK((invocation) != -1)
// A few more checks that only happen in debug mode
#ifdef NDEBUG
#define DCHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2)
#else
#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
#endif
#ifdef ERROR
#undef ERROR // may conflict with ERROR macro on windows
#endif
enum LogSeverity {INFO = -1, WARNING = -2, ERROR = -3, FATAL = -4};
// NOTE: we add a newline to the end of the output if it's not there already
inline void LogPrintf(int severity, const char* pat, va_list ap) {
// We write directly to the stderr file descriptor and avoid FILE
// buffering because that may invoke malloc()
char buf[600];
perftools_vsnprintf(buf, sizeof(buf)-1, pat, ap);
if (buf[0] != '\0' && buf[strlen(buf)-1] != '\n') {
assert(strlen(buf)+1 < sizeof(buf));
strcat(buf, "\n");
}
WRITE_TO_STDERR(buf, strlen(buf));
if ((severity) == FATAL)
abort(); // LOG(FATAL) indicates a big problem, so don't run atexit() calls
}
// Note that since the order of global constructors is unspecified,
// global code that calls RAW_LOG may execute before FLAGS_verbose is set.
// Such code will run with verbosity == 0 no matter what.
#define VLOG_IS_ON(severity) (FLAGS_verbose >= severity)
// In a better world, we'd use __VA_ARGS__, but VC++ 7 doesn't support it.
#define LOG_PRINTF(severity, pat) do { \
if (VLOG_IS_ON(severity)) { \
va_list ap; \
va_start(ap, pat); \
LogPrintf(severity, pat, ap); \
va_end(ap); \
} \
} while (0)
// RAW_LOG is the main function; some synonyms are used in unittests.
inline void RAW_LOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void RAW_VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void LOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void LOG_IF(int lvl, bool cond, const char* pat, ...) {
if (cond) LOG_PRINTF(lvl, pat);
}
// This isn't technically logging, but it's also IO and also is an
// attempt to be "raw" -- that is, to not use any higher-level libc
// routines that might allocate memory or (ideally) try to allocate
// locks. We use an opaque file handle (not necessarily an int)
// to allow even more low-level stuff in the future.
// Like other "raw" routines, these functions are best effort, and
// thus don't return error codes (except RawOpenForWriting()).
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
#ifndef NOMINMAX
#define NOMINMAX // @#!$& windows
#endif
#include <windows.h>
typedef HANDLE RawFD;
const RawFD kIllegalRawFD = INVALID_HANDLE_VALUE;
#else
typedef int RawFD;
const RawFD kIllegalRawFD = -1; // what open returns if it fails
#endif // defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
RawFD RawOpenForWriting(const char* filename); // uses default permissions
void RawWrite(RawFD fd, const char* buf, size_t len);
void RawClose(RawFD fd);
#endif // _LOGGING_H_

View File

@ -1,582 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// A low-level allocator that can be used by other low-level
// modules without introducing dependency cycles.
// This allocator is slow and wasteful of memory;
// it should not be used when performance is key.
#include "base/low_level_alloc.h"
#include "base/dynamic_annotations.h"
#include "base/spinlock.h"
#include "base/logging.h"
#include "malloc_hook-inl.h"
#include <gperftools/malloc_hook.h>
#include <errno.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_MMAP
#include <sys/mman.h>
#endif
#include <new> // for placement-new
// On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
// form of the name instead.
#ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON
#endif
// A first-fit allocator with amortized logarithmic free() time.
LowLevelAlloc::PagesAllocator::~PagesAllocator() {
}
// ---------------------------------------------------------------------------
static const int kMaxLevel = 30;
// We put this class-only struct in a namespace to avoid polluting the
// global namespace with this struct name (thus risking an ODR violation).
namespace low_level_alloc_internal {
// This struct describes one allocated block, or one free block.
struct AllocList {
struct Header {
intptr_t size; // size of entire region, including this field. Must be
// first. Valid in both allocated and unallocated blocks
intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
LowLevelAlloc::Arena *arena; // pointer to parent arena
void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
} header;
// Next two fields: in unallocated blocks: freelist skiplist data
// in allocated blocks: overlaps with client data
int levels; // levels in skiplist used
AllocList *next[kMaxLevel]; // actually has levels elements.
// The AllocList node may not have room for
// all kMaxLevel entries. See max_fit in
// LLA_SkiplistLevels()
};
}
using low_level_alloc_internal::AllocList;
// ---------------------------------------------------------------------------
// A trivial skiplist implementation. This is used to keep the freelist
// in address order while taking only logarithmic time per insert and delete.
// An integer approximation of log2(size/base)
// Requires size >= base.
static int IntLog2(size_t size, size_t base) {
int result = 0;
for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
result++;
}
// floor(size / 2**result) <= base < floor(size / 2**(result-1))
// => log2(size/(base+1)) <= result < 1+log2(size/base)
// => result ~= log2(size/base)
return result;
}
// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
static int Random() {
static uint32 r = 1; // no locking---it's not critical
ANNOTATE_BENIGN_RACE(&r, "benign race, not critical.");
int result = 1;
while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
result++;
}
return result;
}
// Return a number of skiplist levels for a node of size bytes, where
// base is the minimum node size. Compute level=log2(size / base)+n
// where n is 1 if random is false and otherwise a random number generated with
// the standard distribution for a skiplist: See Random() above.
// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
// term, so first-fit searches touch fewer nodes. "level" is clipped so
// level<kMaxLevel and next[level-1] will fit in the node.
// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
static int LLA_SkiplistLevels(size_t size, size_t base, bool random) {
// max_fit is the maximum number of levels that will fit in a node for the
// given size. We can't return more than max_fit, no matter what the
// random number generator says.
int max_fit = (size-OFFSETOF_MEMBER(AllocList, next)) / sizeof (AllocList *);
int level = IntLog2(size, base) + (random? Random() : 1);
if (level > max_fit) level = max_fit;
if (level > kMaxLevel-1) level = kMaxLevel - 1;
RAW_CHECK(level >= 1, "block not big enough for even one level");
return level;
}
// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
// points to the last element at level i in the AllocList less than *e, or is
// head if no such element exists.
static AllocList *LLA_SkiplistSearch(AllocList *head,
AllocList *e, AllocList **prev) {
AllocList *p = head;
for (int level = head->levels - 1; level >= 0; level--) {
for (AllocList *n; (n = p->next[level]) != 0 && n < e; p = n) {
}
prev[level] = p;
}
return (head->levels == 0) ? 0 : prev[0]->next[0];
}
// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
// Requires that e->levels be previously set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
AllocList **prev) {
LLA_SkiplistSearch(head, e, prev);
for (; head->levels < e->levels; head->levels++) { // extend prev pointers
prev[head->levels] = head; // to all *e's levels
}
for (int i = 0; i != e->levels; i++) { // add element to list
e->next[i] = prev[i]->next[i];
prev[i]->next[i] = e;
}
}
// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
// Requires that e->levels be previous set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
AllocList **prev) {
AllocList *found = LLA_SkiplistSearch(head, e, prev);
RAW_CHECK(e == found, "element not in freelist");
for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
prev[i]->next[i] = e->next[i];
}
while (head->levels > 0 && head->next[head->levels - 1] == 0) {
head->levels--; // reduce head->levels if level unused
}
}
// ---------------------------------------------------------------------------
// Arena implementation
struct LowLevelAlloc::Arena {
Arena() : mu(SpinLock::LINKER_INITIALIZED) {} // does nothing; for static init
explicit Arena(int) : pagesize(0) {} // set pagesize to zero explicitly
// for non-static init
SpinLock mu; // protects freelist, allocation_count,
// pagesize, roundup, min_size
AllocList freelist; // head of free list; sorted by addr (under mu)
int32 allocation_count; // count of allocated blocks (under mu)
int32 flags; // flags passed to NewArena (ro after init)
size_t pagesize; // ==getpagesize() (init under mu, then ro)
size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList))
// (init under mu, then ro)
size_t min_size; // smallest allocation block size
// (init under mu, then ro)
PagesAllocator *allocator;
};
// The default arena, which is used when 0 is passed instead of an Arena
// pointer.
static struct LowLevelAlloc::Arena default_arena;
// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
// do not want malloc hook reporting, so that for them there's no malloc hook
// reporting even during arena creation.
static struct LowLevelAlloc::Arena unhooked_arena;
static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
namespace {
class DefaultPagesAllocator : public LowLevelAlloc::PagesAllocator {
public:
virtual ~DefaultPagesAllocator() {};
virtual void *MapPages(int32 flags, size_t size);
virtual void UnMapPages(int32 flags, void *addr, size_t size);
};
}
// magic numbers to identify allocated and unallocated blocks
static const intptr_t kMagicAllocated = 0x4c833e95;
static const intptr_t kMagicUnallocated = ~kMagicAllocated;
namespace {
class SCOPED_LOCKABLE ArenaLock {
public:
explicit ArenaLock(LowLevelAlloc::Arena *arena)
EXCLUSIVE_LOCK_FUNCTION(arena->mu)
: left_(false), mask_valid_(false), arena_(arena) {
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
// We've decided not to support async-signal-safe arena use until
// there a demonstrated need. Here's how one could do it though
// (would need to be made more portable).
#if 0
sigset_t all;
sigfillset(&all);
this->mask_valid_ =
(pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
#else
RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
#endif
}
this->arena_->mu.Lock();
}
~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
void Leave() /*UNLOCK_FUNCTION()*/ {
this->arena_->mu.Unlock();
#if 0
if (this->mask_valid_) {
pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
}
#endif
this->left_ = true;
}
private:
bool left_; // whether left region
bool mask_valid_;
#if 0
sigset_t mask_; // old mask of blocked signals
#endif
LowLevelAlloc::Arena *arena_;
DISALLOW_COPY_AND_ASSIGN(ArenaLock);
};
} // anonymous namespace
// create an appropriate magic number for an object at "ptr"
// "magic" should be kMagicAllocated or kMagicUnallocated
inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
return magic ^ reinterpret_cast<intptr_t>(ptr);
}
// Initialize the fields of an Arena
static void ArenaInit(LowLevelAlloc::Arena *arena) {
if (arena->pagesize == 0) {
arena->pagesize = getpagesize();
// Round up block sizes to a power of two close to the header size.
arena->roundup = 16;
while (arena->roundup < sizeof (arena->freelist.header)) {
arena->roundup += arena->roundup;
}
// Don't allocate blocks less than twice the roundup size to avoid tiny
// free blocks.
arena->min_size = 2 * arena->roundup;
arena->freelist.header.size = 0;
arena->freelist.header.magic =
Magic(kMagicUnallocated, &arena->freelist.header);
arena->freelist.header.arena = arena;
arena->freelist.levels = 0;
memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
arena->allocation_count = 0;
if (arena == &default_arena) {
// Default arena should be hooked, e.g. for heap-checker to trace
// pointer chains through objects in the default arena.
arena->flags = LowLevelAlloc::kCallMallocHook;
} else if (arena == &unhooked_async_sig_safe_arena) {
arena->flags = LowLevelAlloc::kAsyncSignalSafe;
} else {
arena->flags = 0; // other arenas' flags may be overridden by client,
// but unhooked_arena will have 0 in 'flags'.
}
arena->allocator = LowLevelAlloc::GetDefaultPagesAllocator();
}
}
// L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
Arena *meta_data_arena) {
return NewArenaWithCustomAlloc(flags, meta_data_arena, NULL);
}
// L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32 flags,
Arena *meta_data_arena,
PagesAllocator *allocator) {
RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
if (meta_data_arena == &default_arena) {
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
meta_data_arena = &unhooked_async_sig_safe_arena;
} else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
meta_data_arena = &unhooked_arena;
}
}
// Arena(0) uses the constructor for non-static contexts
Arena *result =
new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
ArenaInit(result);
result->flags = flags;
if (allocator) {
result->allocator = allocator;
}
return result;
}
// L < arena->mu, L < arena->arena->mu
bool LowLevelAlloc::DeleteArena(Arena *arena) {
RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
"may not delete default arena");
ArenaLock section(arena);
bool empty = (arena->allocation_count == 0);
section.Leave();
if (empty) {
while (arena->freelist.next[0] != 0) {
AllocList *region = arena->freelist.next[0];
size_t size = region->header.size;
arena->freelist.next[0] = region->next[0];
RAW_CHECK(region->header.magic ==
Magic(kMagicUnallocated, &region->header),
"bad magic number in DeleteArena()");
RAW_CHECK(region->header.arena == arena,
"bad arena pointer in DeleteArena()");
RAW_CHECK(size % arena->pagesize == 0,
"empty arena has non-page-aligned block size");
RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
"empty arena has non-page-aligned block");
int munmap_result;
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
munmap_result = munmap(region, size);
} else {
munmap_result = MallocHook::UnhookedMUnmap(region, size);
}
RAW_CHECK(munmap_result == 0,
"LowLevelAlloc::DeleteArena: munmap failed address");
}
Free(arena);
}
return empty;
}
// ---------------------------------------------------------------------------
// Return value rounded up to next multiple of align.
// align must be a power of two.
static intptr_t RoundUp(intptr_t addr, intptr_t align) {
return (addr + align - 1) & ~(align - 1);
}
// Equivalent to "return prev->next[i]" but with sanity checking
// that the freelist is in the correct order, that it
// consists of regions marked "unallocated", and that no two regions
// are adjacent in memory (they should have been coalesced).
// L < arena->mu
static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
RAW_CHECK(i < prev->levels, "too few levels in Next()");
AllocList *next = prev->next[i];
if (next != 0) {
RAW_CHECK(next->header.magic == Magic(kMagicUnallocated, &next->header),
"bad magic number in Next()");
RAW_CHECK(next->header.arena == arena,
"bad arena pointer in Next()");
if (prev != &arena->freelist) {
RAW_CHECK(prev < next, "unordered freelist");
RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
reinterpret_cast<char *>(next), "malformed freelist");
}
}
return next;
}
// Coalesce list item "a" with its successor if they are adjacent.
static void Coalesce(AllocList *a) {
AllocList *n = a->next[0];
if (n != 0 && reinterpret_cast<char *>(a) + a->header.size ==
reinterpret_cast<char *>(n)) {
LowLevelAlloc::Arena *arena = a->header.arena;
a->header.size += n->header.size;
n->header.magic = 0;
n->header.arena = 0;
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, n, prev);
LLA_SkiplistDelete(&arena->freelist, a, prev);
a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, true);
LLA_SkiplistInsert(&arena->freelist, a, prev);
}
}
// Adds block at location "v" to the free list
// L >= arena->mu
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
AllocList *f = reinterpret_cast<AllocList *>(
reinterpret_cast<char *>(v) - sizeof (f->header));
RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in AddToFreelist()");
RAW_CHECK(f->header.arena == arena,
"bad arena pointer in AddToFreelist()");
f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, true);
AllocList *prev[kMaxLevel];
LLA_SkiplistInsert(&arena->freelist, f, prev);
f->header.magic = Magic(kMagicUnallocated, &f->header);
Coalesce(f); // maybe coalesce with successor
Coalesce(prev[0]); // maybe coalesce with predecessor
}
// Frees storage allocated by LowLevelAlloc::Alloc().
// L < arena->mu
void LowLevelAlloc::Free(void *v) {
if (v != 0) {
AllocList *f = reinterpret_cast<AllocList *>(
reinterpret_cast<char *>(v) - sizeof (f->header));
RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in Free()");
LowLevelAlloc::Arena *arena = f->header.arena;
if ((arena->flags & kCallMallocHook) != 0) {
MallocHook::InvokeDeleteHook(v);
}
ArenaLock section(arena);
AddToFreelist(v, arena);
RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
arena->allocation_count--;
section.Leave();
}
}
// allocates and returns a block of size bytes, to be freed with Free()
// L < arena->mu
static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
void *result = 0;
if (request != 0) {
AllocList *s; // will point to region that satisfies request
ArenaLock section(arena);
ArenaInit(arena);
// round up with header
size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
for (;;) { // loop until we find a suitable region
// find the minimum levels that a block of this size must have
int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
if (i < arena->freelist.levels) { // potential blocks exist
AllocList *before = &arena->freelist; // predecessor of s
while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
before = s;
}
if (s != 0) { // we found a region
break;
}
}
// we unlock before mmap() both because mmap() may call a callback hook,
// and because it may be slow.
arena->mu.Unlock();
// mmap generous 64K chunks to decrease
// the chances/impact of fragmentation:
size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
void *new_pages = arena->allocator->MapPages(arena->flags, new_pages_size);
arena->mu.Lock();
s = reinterpret_cast<AllocList *>(new_pages);
s->header.size = new_pages_size;
// Pretend the block is allocated; call AddToFreelist() to free it.
s->header.magic = Magic(kMagicAllocated, &s->header);
s->header.arena = arena;
AddToFreelist(&s->levels, arena); // insert new region into free list
}
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
// s points to the first free region that's big enough
if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
AllocList *n = reinterpret_cast<AllocList *>
(req_rnd + reinterpret_cast<char *>(s));
n->header.size = s->header.size - req_rnd;
n->header.magic = Magic(kMagicAllocated, &n->header);
n->header.arena = arena;
s->header.size = req_rnd;
AddToFreelist(&n->levels, arena);
}
s->header.magic = Magic(kMagicAllocated, &s->header);
RAW_CHECK(s->header.arena == arena, "");
arena->allocation_count++;
section.Leave();
result = &s->levels;
}
ANNOTATE_NEW_MEMORY(result, request);
return result;
}
void *LowLevelAlloc::Alloc(size_t request) {
void *result = DoAllocWithArena(request, &default_arena);
if ((default_arena.flags & kCallMallocHook) != 0) {
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly
MallocHook::InvokeNewHook(result, request);
}
return result;
}
void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
RAW_CHECK(arena != 0, "must pass a valid arena");
void *result = DoAllocWithArena(request, arena);
if ((arena->flags & kCallMallocHook) != 0) {
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly
MallocHook::InvokeNewHook(result, request);
}
return result;
}
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
return &default_arena;
}
static DefaultPagesAllocator *default_pages_allocator;
static union {
char chars[sizeof(DefaultPagesAllocator)];
void *ptr;
} debug_pages_allocator_space;
LowLevelAlloc::PagesAllocator *LowLevelAlloc::GetDefaultPagesAllocator(void) {
if (default_pages_allocator) {
return default_pages_allocator;
}
default_pages_allocator = new (debug_pages_allocator_space.chars) DefaultPagesAllocator();
return default_pages_allocator;
}
void *DefaultPagesAllocator::MapPages(int32 flags, size_t size) {
void *new_pages;
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
new_pages = MallocHook::UnhookedMMap(0, size,
PROT_WRITE|PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
} else {
new_pages = mmap(0, size,
PROT_WRITE|PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
}
RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
return new_pages;
}
void DefaultPagesAllocator::UnMapPages(int32 flags, void *region, size_t size) {
int munmap_result;
if ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
munmap_result = munmap(region, size);
} else {
munmap_result = MallocHook::UnhookedMUnmap(region, size);
}
RAW_CHECK(munmap_result == 0,
"LowLevelAlloc::DeleteArena: munmap failed address");
}

View File

@ -1,120 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if !defined(_BASE_LOW_LEVEL_ALLOC_H_)
#define _BASE_LOW_LEVEL_ALLOC_H_
// A simple thread-safe memory allocator that does not depend on
// mutexes or thread-specific data. It is intended to be used
// sparingly, and only when malloc() would introduce an unwanted
// dependency, such as inside the heap-checker.
#include "../config.h"
#include <stddef.h> // for size_t
#include "base/basictypes.h"
class LowLevelAlloc {
public:
class PagesAllocator {
public:
virtual ~PagesAllocator();
virtual void *MapPages(int32 flags, size_t size) = 0;
virtual void UnMapPages(int32 flags, void *addr, size_t size) = 0;
};
static PagesAllocator *GetDefaultPagesAllocator(void);
struct Arena; // an arena from which memory may be allocated
// Returns a pointer to a block of at least "request" bytes
// that have been newly allocated from the specific arena.
// for Alloc() call the DefaultArena() is used.
// Returns 0 if passed request==0.
// Does not return 0 under other circumstances; it crashes if memory
// is not available.
static void *Alloc(size_t request)
ATTRIBUTE_SECTION(malloc_hook);
static void *AllocWithArena(size_t request, Arena *arena)
ATTRIBUTE_SECTION(malloc_hook);
// Deallocates a region of memory that was previously allocated with
// Alloc(). Does nothing if passed 0. "s" must be either 0,
// or must have been returned from a call to Alloc() and not yet passed to
// Free() since that call to Alloc(). The space is returned to the arena
// from which it was allocated.
static void Free(void *s) ATTRIBUTE_SECTION(malloc_hook);
// ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
// are to put all callers of MallocHook::Invoke* in this module
// into special section,
// so that MallocHook::GetCallerStackTrace can function accurately.
// Create a new arena.
// The root metadata for the new arena is allocated in the
// meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
// These values may be ored into flags:
enum {
// Report calls to Alloc() and Free() via the MallocHook interface.
// Set in the DefaultArena.
kCallMallocHook = 0x0001,
// Make calls to Alloc(), Free() be async-signal-safe. Not set in
// DefaultArena().
kAsyncSignalSafe = 0x0002,
// When used with DefaultArena(), the NewArena() and DeleteArena() calls
// obey the flags given explicitly in the NewArena() call, even if those
// flags differ from the settings in DefaultArena(). So the call
// NewArena(kAsyncSignalSafe, DefaultArena()) is itself async-signal-safe,
// as well as generatating an arena that provides async-signal-safe
// Alloc/Free.
};
static Arena *NewArena(int32 flags, Arena *meta_data_arena);
// note: pages allocator will never be destroyed and allocated pages will never be freed
// When allocator is NULL, it's same as NewArena
static Arena *NewArenaWithCustomAlloc(int32 flags, Arena *meta_data_arena, PagesAllocator *allocator);
// Destroys an arena allocated by NewArena and returns true,
// provided no allocated blocks remain in the arena.
// If allocated blocks remain in the arena, does nothing and
// returns false.
// It is illegal to attempt to destroy the DefaultArena().
static bool DeleteArena(Arena *arena);
// The default arena that always exists.
static Arena *DefaultArena();
private:
LowLevelAlloc(); // no instances
};
#endif

View File

@ -1,332 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// ---
// Author: Craig Silverstein.
//
// A simple mutex wrapper, supporting locks and read-write locks.
// You should assume the locks are *not* re-entrant.
//
// To use: you should define the following macros in your configure.ac:
// ACX_PTHREAD
// AC_RWLOCK
// The latter is defined in ../autoconf.
//
// This class is meant to be internal-only and should be wrapped by an
// internal namespace. Before you use this module, please give the
// name of your internal namespace for this module. Or, if you want
// to expose it, you'll want to move it to the Google namespace. We
// cannot put this class in global namespace because there can be some
// problems when we have multiple versions of Mutex in each shared object.
//
// NOTE: TryLock() is broken for NO_THREADS mode, at least in NDEBUG
// mode.
//
// CYGWIN NOTE: Cygwin support for rwlock seems to be buggy:
// http://www.cygwin.com/ml/cygwin/2008-12/msg00017.html
// Because of that, we might as well use windows locks for
// cygwin. They seem to be more reliable than the cygwin pthreads layer.
//
// TRICKY IMPLEMENTATION NOTE:
// This class is designed to be safe to use during
// dynamic-initialization -- that is, by global constructors that are
// run before main() starts. The issue in this case is that
// dynamic-initialization happens in an unpredictable order, and it
// could be that someone else's dynamic initializer could call a
// function that tries to acquire this mutex -- but that all happens
// before this mutex's constructor has run. (This can happen even if
// the mutex and the function that uses the mutex are in the same .cc
// file.) Basically, because Mutex does non-trivial work in its
// constructor, it's not, in the naive implementation, safe to use
// before dynamic initialization has run on it.
//
// The solution used here is to pair the actual mutex primitive with a
// bool that is set to true when the mutex is dynamically initialized.
// (Before that it's false.) Then we modify all mutex routines to
// look at the bool, and not try to lock/unlock until the bool makes
// it to true (which happens after the Mutex constructor has run.)
//
// This works because before main() starts -- particularly, during
// dynamic initialization -- there are no threads, so a) it's ok that
// the mutex operations are a no-op, since we don't need locking then
// anyway; and b) we can be quite confident our bool won't change
// state between a call to Lock() and a call to Unlock() (that would
// require a global constructor in one translation unit to call Lock()
// and another global constructor in another translation unit to call
// Unlock() later, which is pretty perverse).
//
// That said, it's tricky, and can conceivably fail; it's safest to
// avoid trying to acquire a mutex in a global constructor, if you
// can. One way it can fail is that a really smart compiler might
// initialize the bool to true at static-initialization time (too
// early) rather than at dynamic-initialization time. To discourage
// that, we set is_safe_ to true in code (not the constructor
// colon-initializer) and set it to true via a function that always
// evaluates to true, but that the compiler can't know always
// evaluates to true. This should be good enough.
//
// A related issue is code that could try to access the mutex
// after it's been destroyed in the global destructors (because
// the Mutex global destructor runs before some other global
// destructor, that tries to acquire the mutex). The way we
// deal with this is by taking a constructor arg that global
// mutexes should pass in, that causes the destructor to do no
// work. We still depend on the compiler not doing anything
// weird to a Mutex's memory after it is destroyed, but for a
// static global variable, that's pretty safe.
#ifndef GOOGLE_MUTEX_H_
#define GOOGLE_MUTEX_H_
#include "../config.h"
#if defined(NO_THREADS)
typedef int MutexType; // to keep a lock-count
#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN // We only need minimal includes
# endif
// We need Windows NT or later for TryEnterCriticalSection(). If you
// don't need that functionality, you can remove these _WIN32_WINNT
// lines, and change TryLock() to assert(0) or something.
# ifndef _WIN32_WINNT
# define _WIN32_WINNT 0x0400
# endif
# include <windows.h>
typedef CRITICAL_SECTION MutexType;
#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
// Needed for pthread_rwlock_*. If it causes problems, you could take it
// out, but then you'd have to unset HAVE_RWLOCK (at least on linux -- it
// *does* cause problems for FreeBSD, or MacOSX, but isn't needed
// for locking there.)
# ifdef __linux__
# define _XOPEN_SOURCE 500 // may be needed to get the rwlock calls
# endif
# include <pthread.h>
typedef pthread_rwlock_t MutexType;
#elif defined(HAVE_PTHREAD)
# include <pthread.h>
typedef pthread_mutex_t MutexType;
#else
# error Need to implement mutex.h for your architecture, or #define NO_THREADS
#endif
#include <assert.h>
#include <stdlib.h> // for abort()
#define MUTEX_NAMESPACE perftools_mutex_namespace
namespace MUTEX_NAMESPACE {
class Mutex {
public:
// This is used for the single-arg constructor
enum LinkerInitialized { LINKER_INITIALIZED };
// Create a Mutex that is not held by anybody. This constructor is
// typically used for Mutexes allocated on the heap or the stack.
inline Mutex();
// This constructor should be used for global, static Mutex objects.
// It inhibits work being done by the destructor, which makes it
// safer for code that tries to acqiure this mutex in their global
// destructor.
inline Mutex(LinkerInitialized);
// Destructor
inline ~Mutex();
inline void Lock(); // Block if needed until free then acquire exclusively
inline void Unlock(); // Release a lock acquired via Lock()
inline bool TryLock(); // If free, Lock() and return true, else return false
// Note that on systems that don't support read-write locks, these may
// be implemented as synonyms to Lock() and Unlock(). So you can use
// these for efficiency, but don't use them anyplace where being able
// to do shared reads is necessary to avoid deadlock.
inline void ReaderLock(); // Block until free or shared then acquire a share
inline void ReaderUnlock(); // Release a read share of this Mutex
inline void WriterLock() { Lock(); } // Acquire an exclusive lock
inline void WriterUnlock() { Unlock(); } // Release a lock from WriterLock()
private:
MutexType mutex_;
// We want to make sure that the compiler sets is_safe_ to true only
// when we tell it to, and never makes assumptions is_safe_ is
// always true. volatile is the most reliable way to do that.
volatile bool is_safe_;
// This indicates which constructor was called.
bool destroy_;
inline void SetIsSafe() { is_safe_ = true; }
// Catch the error of writing Mutex when intending MutexLock.
Mutex(Mutex* /*ignored*/) {}
// Disallow "evil" constructors
Mutex(const Mutex&);
void operator=(const Mutex&);
};
// Now the implementation of Mutex for various systems
#if defined(NO_THREADS)
// When we don't have threads, we can be either reading or writing,
// but not both. We can have lots of readers at once (in no-threads
// mode, that's most likely to happen in recursive function calls),
// but only one writer. We represent this by having mutex_ be -1 when
// writing and a number > 0 when reading (and 0 when no lock is held).
//
// In debug mode, we assert these invariants, while in non-debug mode
// we do nothing, for efficiency. That's why everything is in an
// assert.
Mutex::Mutex() : mutex_(0) { }
Mutex::Mutex(Mutex::LinkerInitialized) : mutex_(0) { }
Mutex::~Mutex() { assert(mutex_ == 0); }
void Mutex::Lock() { assert(--mutex_ == -1); }
void Mutex::Unlock() { assert(mutex_++ == -1); }
bool Mutex::TryLock() { if (mutex_) return false; Lock(); return true; }
void Mutex::ReaderLock() { assert(++mutex_ > 0); }
void Mutex::ReaderUnlock() { assert(mutex_-- > 0); }
#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
Mutex::Mutex() : destroy_(true) {
InitializeCriticalSection(&mutex_);
SetIsSafe();
}
Mutex::Mutex(LinkerInitialized) : destroy_(false) {
InitializeCriticalSection(&mutex_);
SetIsSafe();
}
Mutex::~Mutex() { if (destroy_) DeleteCriticalSection(&mutex_); }
void Mutex::Lock() { if (is_safe_) EnterCriticalSection(&mutex_); }
void Mutex::Unlock() { if (is_safe_) LeaveCriticalSection(&mutex_); }
bool Mutex::TryLock() { return is_safe_ ?
TryEnterCriticalSection(&mutex_) != 0 : true; }
void Mutex::ReaderLock() { Lock(); } // we don't have read-write locks
void Mutex::ReaderUnlock() { Unlock(); }
#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
if (is_safe_ && fncall(&mutex_) != 0) abort(); \
} while (0)
Mutex::Mutex() : destroy_(true) {
SetIsSafe();
if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
}
Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
SetIsSafe();
if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
}
Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_rwlock_destroy); }
void Mutex::Lock() { SAFE_PTHREAD(pthread_rwlock_wrlock); }
void Mutex::Unlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
bool Mutex::TryLock() { return is_safe_ ?
pthread_rwlock_trywrlock(&mutex_) == 0 : true; }
void Mutex::ReaderLock() { SAFE_PTHREAD(pthread_rwlock_rdlock); }
void Mutex::ReaderUnlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
#undef SAFE_PTHREAD
#elif defined(HAVE_PTHREAD)
#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
if (is_safe_ && fncall(&mutex_) != 0) abort(); \
} while (0)
Mutex::Mutex() : destroy_(true) {
SetIsSafe();
if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
}
Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
SetIsSafe();
if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
}
Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_mutex_destroy); }
void Mutex::Lock() { SAFE_PTHREAD(pthread_mutex_lock); }
void Mutex::Unlock() { SAFE_PTHREAD(pthread_mutex_unlock); }
bool Mutex::TryLock() { return is_safe_ ?
pthread_mutex_trylock(&mutex_) == 0 : true; }
void Mutex::ReaderLock() { Lock(); }
void Mutex::ReaderUnlock() { Unlock(); }
#undef SAFE_PTHREAD
#endif
// --------------------------------------------------------------------------
// Some helper classes
// MutexLock(mu) acquires mu when constructed and releases it when destroyed.
class MutexLock {
public:
explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
~MutexLock() { mu_->Unlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
MutexLock(const MutexLock&);
void operator=(const MutexLock&);
};
// ReaderMutexLock and WriterMutexLock do the same, for rwlocks
class ReaderMutexLock {
public:
explicit ReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
~ReaderMutexLock() { mu_->ReaderUnlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
ReaderMutexLock(const ReaderMutexLock&);
void operator=(const ReaderMutexLock&);
};
class WriterMutexLock {
public:
explicit WriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
~WriterMutexLock() { mu_->WriterUnlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
WriterMutexLock(const WriterMutexLock&);
void operator=(const WriterMutexLock&);
};
// Catch bug where variable name is omitted, e.g. MutexLock (&mu);
#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_decl_missing_var_name)
#define ReaderMutexLock(x) COMPILE_ASSERT(0, rmutex_lock_decl_missing_var_name)
#define WriterMutexLock(x) COMPILE_ASSERT(0, wmutex_lock_decl_missing_var_name)
} // namespace MUTEX_NAMESPACE
using namespace MUTEX_NAMESPACE;
#undef MUTEX_NAMESPACE
#endif /* #define GOOGLE_SIMPLE_MUTEX_H_ */

View File

@ -1,129 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
#include "../config.h"
#include "base/spinlock.h"
#include "base/spinlock_internal.h"
#include "base/sysinfo.h" /* for GetSystemCPUsCount() */
// NOTE on the Lock-state values:
//
// kSpinLockFree represents the unlocked state
// kSpinLockHeld represents the locked state with no waiters
// kSpinLockSleeper represents the locked state with waiters
static int adaptive_spin_count = 0;
const base::LinkerInitialized SpinLock::LINKER_INITIALIZED =
base::LINKER_INITIALIZED;
namespace {
struct SpinLock_InitHelper {
SpinLock_InitHelper() {
// On multi-cpu machines, spin for longer before yielding
// the processor or sleeping. Reduces idle time significantly.
if (GetSystemCPUsCount() > 1) {
adaptive_spin_count = 1000;
}
}
};
// Hook into global constructor execution:
// We do not do adaptive spinning before that,
// but nothing lock-intensive should be going on at that time.
static SpinLock_InitHelper init_helper;
inline void SpinlockPause(void) {
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
__asm__ __volatile__("rep; nop" : : );
#endif
}
} // unnamed namespace
// Monitor the lock to see if its value changes within some time
// period (adaptive_spin_count loop iterations). The last value read
// from the lock is returned from the method.
Atomic32 SpinLock::SpinLoop() {
int c = adaptive_spin_count;
while (base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree && --c > 0) {
SpinlockPause();
}
return base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockSleeper);
}
void SpinLock::SlowLock() {
Atomic32 lock_value = SpinLoop();
int lock_wait_call_count = 0;
while (lock_value != kSpinLockFree) {
// If the lock is currently held, but not marked as having a sleeper, mark
// it as having a sleeper.
if (lock_value == kSpinLockHeld) {
// Here, just "mark" that the thread is going to sleep. Don't store the
// lock wait time in the lock as that will cause the current lock
// owner to think it experienced contention.
lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
kSpinLockHeld,
kSpinLockSleeper);
if (lock_value == kSpinLockHeld) {
// Successfully transitioned to kSpinLockSleeper. Pass
// kSpinLockSleeper to the SpinLockDelay routine to properly indicate
// the last lock_value observed.
lock_value = kSpinLockSleeper;
} else if (lock_value == kSpinLockFree) {
// Lock is free again, so try and acquire it before sleeping. The
// new lock state will be the number of cycles this thread waited if
// this thread obtains the lock.
lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
kSpinLockFree,
kSpinLockSleeper);
continue; // skip the delay at the end of the loop
}
}
// Wait for an OS specific delay.
base::internal::SpinLockDelay(&lockword_, lock_value,
++lock_wait_call_count);
// Spin again after returning from the wait routine to give this thread
// some chance of obtaining the lock.
lock_value = SpinLoop();
}
}
void SpinLock::SlowUnlock() {
// wake waiter if necessary
base::internal::SpinLockWake(&lockword_, false);
}

View File

@ -1,143 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// SpinLock is async signal safe.
// If used within a signal handler, all lock holders
// should block the signal even outside the signal handler.
#ifndef BASE_SPINLOCK_H_
#define BASE_SPINLOCK_H_
#include "../config.h"
#include "base/atomicops.h"
#include "base/basictypes.h"
#include "base/dynamic_annotations.h"
#include "base/thread_annotations.h"
class LOCKABLE SpinLock {
public:
SpinLock() : lockword_(kSpinLockFree) { }
// Special constructor for use with static SpinLock objects. E.g.,
//
// static SpinLock lock(base::LINKER_INITIALIZED);
//
// When intialized using this constructor, we depend on the fact
// that the linker has already initialized the memory appropriately.
// A SpinLock constructed like this can be freely used from global
// initializers without worrying about the order in which global
// initializers run.
explicit SpinLock(base::LinkerInitialized /*x*/) {
// Does nothing; lockword_ is already initialized
}
// Acquire this SpinLock.
// TODO(csilvers): uncomment the annotation when we figure out how to
// support this macro with 0 args (see thread_annotations.h)
inline void Lock() /*EXCLUSIVE_LOCK_FUNCTION()*/ {
if (base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockHeld) != kSpinLockFree) {
SlowLock();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
// Try to acquire this SpinLock without blocking and return true if the
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
bool res =
(base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockHeld) == kSpinLockFree);
if (res) {
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
return res;
}
// Release this SpinLock, which must be held by the calling thread.
// TODO(csilvers): uncomment the annotation when we figure out how to
// support this macro with 0 args (see thread_annotations.h)
inline void Unlock() /*UNLOCK_FUNCTION()*/ {
ANNOTATE_RWLOCK_RELEASED(this, 1);
uint64 prev_value = static_cast<uint64>(
base::subtle::Release_AtomicExchange(&lockword_, kSpinLockFree));
if (prev_value != kSpinLockHeld) {
// Speed the wakeup of any waiter.
SlowUnlock();
}
}
// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
inline bool IsHeld() const {
return base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree;
}
static const base::LinkerInitialized LINKER_INITIALIZED; // backwards compat
private:
enum { kSpinLockFree = 0 };
enum { kSpinLockHeld = 1 };
enum { kSpinLockSleeper = 2 };
volatile Atomic32 lockword_;
void SlowLock();
void SlowUnlock();
Atomic32 SpinLoop();
DISALLOW_COPY_AND_ASSIGN(SpinLock);
};
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
class SCOPED_LOCKABLE SpinLockHolder {
private:
SpinLock* lock_;
public:
inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
: lock_(l) {
l->Lock();
}
// TODO(csilvers): uncomment the annotation when we figure out how to
// support this macro with 0 args (see thread_annotations.h)
inline ~SpinLockHolder() /*UNLOCK_FUNCTION()*/ { lock_->Unlock(); }
};
// Catch bug where variable name is omitted, e.g. SpinLockHolder (&lock);
#define SpinLockHolder(x) COMPILE_ASSERT(0, spin_lock_decl_missing_var_name)
#endif // BASE_SPINLOCK_H_

View File

@ -1,102 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2010, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// The OS-specific header included below must provide two calls:
// base::internal::SpinLockDelay() and base::internal::SpinLockWake().
// See spinlock_internal.h for the spec of SpinLockWake().
// void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop)
// SpinLockDelay() generates an apprproate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
// or may wait for a delay that can be truncated by a call to SpinlockWake(w).
// In all cases, it must return in bounded time even if SpinlockWake() is not
// called.
#include "base/spinlock_internal.h"
// forward declaration for use by spinlock_*-inl.h
namespace base { namespace internal { static int SuggestedDelayNS(int loop); }}
#if defined(_WIN32)
#include "base/spinlock_win32-inl.h"
#elif defined(__linux__)
#include "base/spinlock_linux-inl.h"
#else
#include "base/spinlock_posix-inl.h"
#endif
namespace base {
namespace internal {
// Return a suggested delay in nanoseconds for iteration number "loop"
static int SuggestedDelayNS(int loop) {
// Weak pseudo-random number generator to get some spread between threads
// when many are spinning.
#ifdef BASE_HAS_ATOMIC64
static base::subtle::Atomic64 rand;
uint64 r = base::subtle::NoBarrier_Load(&rand);
r = 0x5deece66dLL * r + 0xb; // numbers from nrand48()
base::subtle::NoBarrier_Store(&rand, r);
r <<= 16; // 48-bit random number now in top 48-bits.
if (loop < 0 || loop > 32) { // limit loop to 0..32
loop = 32;
}
// loop>>3 cannot exceed 4 because loop cannot exceed 32.
// Select top 20..24 bits of lower 48 bits,
// giving approximately 0ms to 16ms.
// Mean is exponential in loop for first 32 iterations, then 8ms.
// The futex path multiplies this by 16, since we expect explicit wakeups
// almost always on that path.
return r >> (44 - (loop >> 3));
#else
static Atomic32 rand;
uint32 r = base::subtle::NoBarrier_Load(&rand);
r = 0x343fd * r + 0x269ec3; // numbers from MSVC++
base::subtle::NoBarrier_Store(&rand, r);
r <<= 1; // 31-bit random number now in top 31-bits.
if (loop < 0 || loop > 32) { // limit loop to 0..32
loop = 32;
}
// loop>>3 cannot exceed 4 because loop cannot exceed 32.
// Select top 20..24 bits of lower 31 bits,
// giving approximately 0ms to 16ms.
// Mean is exponential in loop for first 32 iterations, then 8ms.
// The futex path multiplies this by 16, since we expect explicit wakeups
// almost always on that path.
return r >> (12 - (loop >> 3));
#endif
}
} // namespace internal
} // namespace base

View File

@ -1,51 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2010, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is an internal part spinlock.cc and once.cc
* It may not be used directly by code outside of //base.
*/
#ifndef BASE_SPINLOCK_INTERNAL_H_
#define BASE_SPINLOCK_INTERNAL_H_
#include "../config.h"
#include "base/basictypes.h"
#include "base/atomicops.h"
namespace base {
namespace internal {
void SpinLockWake(volatile Atomic32 *w, bool all);
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop);
} // namespace internal
} // namespace base
#endif

View File

@ -1,101 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Linux-specific part of spinlock_internal.cc
*/
#include <errno.h>
#include <sched.h>
#include <time.h>
#include <limits.h>
#include "base/linux_syscall_support.h"
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_PRIVATE_FLAG 128
static bool have_futex;
static int futex_private_flag = FUTEX_PRIVATE_FLAG;
namespace {
static struct InitModule {
InitModule() {
int x = 0;
// futexes are ints, so we can use them only when
// that's the same size as the lockword_ in SpinLock.
have_futex = (sizeof (Atomic32) == sizeof (int) &&
sys_futex(&x, FUTEX_WAKE, 1, NULL, NULL, 0) >= 0);
if (have_futex &&
sys_futex(&x, FUTEX_WAKE | futex_private_flag, 1, NULL, NULL, 0) < 0) {
futex_private_flag = 0;
}
}
} init_module;
} // anonymous namespace
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
if (loop != 0) {
int save_errno = errno;
struct timespec tm;
tm.tv_sec = 0;
if (have_futex) {
tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
} else {
tm.tv_nsec = 2000001; // above 2ms so linux 2.4 doesn't spin
}
if (have_futex) {
tm.tv_nsec *= 16; // increase the delay; we expect explicit wakeups
sys_futex(reinterpret_cast<int *>(const_cast<Atomic32 *>(w)),
FUTEX_WAIT | futex_private_flag,
value, reinterpret_cast<struct kernel_timespec *>(&tm),
NULL, 0);
} else {
nanosleep(&tm, NULL);
}
errno = save_errno;
}
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
if (have_futex) {
sys_futex(reinterpret_cast<int *>(const_cast<Atomic32 *>(w)),
FUTEX_WAKE | futex_private_flag, all? INT_MAX : 1,
NULL, NULL, 0);
}
}
} // namespace internal
} // namespace base

View File

@ -1,63 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Posix-specific part of spinlock_internal.cc
*/
#include "../config.h"
#include <errno.h>
#ifdef HAVE_SCHED_H
#include <sched.h> /* For sched_yield() */
#endif
#include <time.h> /* For nanosleep() */
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
int save_errno = errno;
if (loop == 0) {
} else if (loop == 1) {
sched_yield();
} else {
struct timespec tm;
tm.tv_sec = 0;
tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
nanosleep(&tm, NULL);
}
errno = save_errno;
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
}
} // namespace internal
} // namespace base

View File

@ -1,54 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Win32-specific part of spinlock_internal.cc
*/
#include <windows.h>
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
if (loop == 0) {
} else if (loop == 1) {
Sleep(0);
} else {
Sleep(base::internal::SuggestedDelayNS(loop) / 1000000);
}
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
}
} // namespace internal
} // namespace base

View File

@ -1,98 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Maxim Lifantsev
*/
#ifndef BASE_STL_ALLOCATOR_H_
#define BASE_STL_ALLOCATOR_H_
#include "../config.h"
#include <stddef.h> // for ptrdiff_t
#include <limits>
#include "base/logging.h"
// Generic allocator class for STL objects
// that uses a given type-less allocator Alloc, which must provide:
// static void* Alloc::Allocate(size_t size);
// static void Alloc::Free(void* ptr, size_t size);
//
// STL_Allocator<T, MyAlloc> provides the same thread-safety
// guarantees as MyAlloc.
//
// Usage example:
// set<T, less<T>, STL_Allocator<T, MyAlloc> > my_set;
// CAVEAT: Parts of the code below are probably specific
// to the STL version(s) we are using.
// The code is simply lifted from what std::allocator<> provides.
template <typename T, class Alloc>
class STL_Allocator {
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template <class T1> struct rebind {
typedef STL_Allocator<T1, Alloc> other;
};
STL_Allocator() { }
STL_Allocator(const STL_Allocator&) { }
template <class T1> STL_Allocator(const STL_Allocator<T1, Alloc>&) { }
~STL_Allocator() { }
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, const void* = 0) {
RAW_DCHECK((n * sizeof(T)) / sizeof(T) == n, "n is too big to allocate");
return static_cast<T*>(Alloc::Allocate(n * sizeof(T)));
}
void deallocate(pointer p, size_type n) { Alloc::Free(p, n * sizeof(T)); }
size_type max_size() const { return size_t(-1) / sizeof(T); }
void construct(pointer p, const T& val) { ::new(p) T(val); }
void construct(pointer p) { ::new(p) T(); }
void destroy(pointer p) { p->~T(); }
// There's no state, so these allocators are always equal
bool operator==(const STL_Allocator&) const { return true; }
};
#endif // BASE_STL_ALLOCATOR_H_

View File

@ -1,860 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../config.h"
#if (defined(_WIN32) || defined(__MINGW32__)) && !defined(__CYGWIN__) && !defined(__CYGWIN32)
# define PLATFORM_WINDOWS 1
#endif
#include <ctype.h> // for isspace()
#include <stdlib.h> // for getenv()
#include <stdio.h> // for snprintf(), sscanf()
#include <string.h> // for memmove(), memchr(), etc.
#include <fcntl.h> // for open()
#include <errno.h> // for errno
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for read()
#endif
#if defined __MACH__ // Mac OS X, almost certainly
#include <mach-o/dyld.h> // for iterating over dll's in ProcMapsIter
#include <mach-o/loader.h> // for iterating over dll's in ProcMapsIter
#include <sys/types.h>
#include <sys/sysctl.h> // how we figure out numcpu's on OS X
#elif defined __FreeBSD__
#include <sys/sysctl.h>
#elif defined __sun__ // Solaris
#include <procfs.h> // for, e.g., prmap_t
#elif defined(PLATFORM_WINDOWS)
#include <process.h> // for getpid() (actually, _getpid())
#include <shlwapi.h> // for SHGetValueA()
#include <tlhelp32.h> // for Module32First()
#endif
#include "base/sysinfo.h"
#include "base/commandlineflags.h"
#include "base/dynamic_annotations.h" // for RunningOnValgrind
#include "base/logging.h"
#ifdef PLATFORM_WINDOWS
#ifdef MODULEENTRY32
// In a change from the usual W-A pattern, there is no A variant of
// MODULEENTRY32. Tlhelp32.h #defines the W variant, but not the A.
// In unicode mode, tlhelp32.h #defines MODULEENTRY32 to be
// MODULEENTRY32W. These #undefs are the only way I see to get back
// access to the original, ascii struct (and related functions).
#undef MODULEENTRY32
#undef Module32First
#undef Module32Next
#undef PMODULEENTRY32
#undef LPMODULEENTRY32
#endif /* MODULEENTRY32 */
// MinGW doesn't seem to define this, perhaps some windowsen don't either.
#ifndef TH32CS_SNAPMODULE32
#define TH32CS_SNAPMODULE32 0
#endif /* TH32CS_SNAPMODULE32 */
#endif /* PLATFORM_WINDOWS */
// Re-run fn until it doesn't cause EINTR.
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
// open/read/close can set errno, which may be illegal at this
// time, so prefer making the syscalls directly if we can.
#ifdef HAVE_SYS_SYSCALL_H
# include <sys/syscall.h>
#endif
#ifdef SYS_open // solaris 11, at least sometimes, only defines SYS_openat
# define safeopen(filename, mode) syscall(SYS_open, filename, mode)
#else
# define safeopen(filename, mode) open(filename, mode)
#endif
#ifdef SYS_read
# define saferead(fd, buffer, size) syscall(SYS_read, fd, buffer, size)
#else
# define saferead(fd, buffer, size) read(fd, buffer, size)
#endif
#ifdef SYS_close
# define safeclose(fd) syscall(SYS_close, fd)
#else
# define safeclose(fd) close(fd)
#endif
// ----------------------------------------------------------------------
// GetenvBeforeMain()
// GetUniquePathFromEnv()
// Some non-trivial getenv-related functions.
// ----------------------------------------------------------------------
// It's not safe to call getenv() in the malloc hooks, because they
// might be called extremely early, before libc is done setting up
// correctly. In particular, the thread library may not be done
// setting up errno. So instead, we use the built-in __environ array
// if it exists, and otherwise read /proc/self/environ directly, using
// system calls to read the file, and thus avoid setting errno.
// /proc/self/environ has a limit of how much data it exports (around
// 8K), so it's not an ideal solution.
const char* GetenvBeforeMain(const char* name) {
#if defined(HAVE___ENVIRON) // if we have it, it's declared in unistd.h
if (__environ) { // can exist but be NULL, if statically linked
const int namelen = strlen(name);
for (char** p = __environ; *p; p++) {
if (strlen(*p) < namelen) {
continue;
}
if (!memcmp(*p, name, namelen) && (*p)[namelen] == '=') // it's a match
return *p + namelen+1; // point after =
}
return NULL;
}
#endif
#if defined(PLATFORM_WINDOWS)
// TODO(mbelshe) - repeated calls to this function will overwrite the
// contents of the static buffer.
static char envvar_buf[1024]; // enough to hold any envvar we care about
if (!GetEnvironmentVariableA(name, envvar_buf, sizeof(envvar_buf)-1))
return NULL;
return envvar_buf;
#endif
// static is ok because this function should only be called before
// main(), when we're single-threaded.
static char envbuf[16<<10];
if (*envbuf == '\0') { // haven't read the environ yet
int fd = safeopen("/proc/self/environ", O_RDONLY);
// The -2 below guarantees the last two bytes of the buffer will be \0\0
if (fd == -1 || // unable to open the file, fall back onto libc
saferead(fd, envbuf, sizeof(envbuf) - 2) < 0) { // error reading file
RAW_VLOG(1, "Unable to open /proc/self/environ, falling back "
"on getenv(\"%s\"), which may not work", name);
if (fd != -1) safeclose(fd);
return getenv(name);
}
safeclose(fd);
}
const int namelen = strlen(name);
const char* p = envbuf;
while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
// proc file has the format NAME=value\0NAME=value\0NAME=value\0...
const char* endp = (char*)memchr(p, '\0', sizeof(envbuf) - (p - envbuf));
if (endp == NULL) // this entry isn't NUL terminated
return NULL;
else if (!memcmp(p, name, namelen) && p[namelen] == '=') // it's a match
return p + namelen+1; // point after =
p = endp + 1;
}
return NULL; // env var never found
}
extern "C" {
const char* TCMallocGetenvSafe(const char* name) {
return GetenvBeforeMain(name);
}
}
// This takes as an argument an environment-variable name (like
// CPUPROFILE) whose value is supposed to be a file-path, and sets
// path to that path, and returns true. If the env var doesn't exist,
// or is the empty string, leave path unchanged and returns false.
// The reason this is non-trivial is that this function handles munged
// pathnames. Here's why:
//
// If we're a child process of the 'main' process, we can't just use
// getenv("CPUPROFILE") -- the parent process will be using that path.
// Instead we append our pid to the pathname. How do we tell if we're a
// child process? Ideally we'd set an environment variable that all
// our children would inherit. But -- and this is seemingly a bug in
// gcc -- if you do a setenv() in a shared libarary in a global
// constructor, the environment setting is lost by the time main() is
// called. The only safe thing we can do in such a situation is to
// modify the existing envvar. So we do a hack: in the parent, we set
// the high bit of the 1st char of CPUPROFILE. In the child, we
// notice the high bit is set and append the pid(). This works
// assuming cpuprofile filenames don't normally have the high bit set
// in their first character! If that assumption is violated, we'll
// still get a profile, but one with an unexpected name.
// TODO(csilvers): set an envvar instead when we can do it reliably.
bool GetUniquePathFromEnv(const char* env_name, char* path) {
char* envval = getenv(env_name);
if (envval == NULL || *envval == '\0')
return false;
if (envval[0] & 128) { // high bit is set
snprintf(path, PATH_MAX, "%c%s_%u", // add pid and clear high bit
envval[0] & 127, envval+1, (unsigned int)(getpid()));
} else {
snprintf(path, PATH_MAX, "%s", envval);
envval[0] |= 128; // set high bit for kids to see
}
return true;
}
void SleepForMilliseconds(int milliseconds) {
#ifdef PLATFORM_WINDOWS
_sleep(milliseconds); // Windows's _sleep takes milliseconds argument
#else
// Sleep for a few milliseconds
struct timespec sleep_time;
sleep_time.tv_sec = milliseconds / 1000;
sleep_time.tv_nsec = (milliseconds % 1000) * 1000000;
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
; // Ignore signals and wait for the full interval to elapse.
#endif
}
int GetSystemCPUsCount()
{
#if defined(PLATFORM_WINDOWS)
// Get the number of processors.
SYSTEM_INFO info;
GetSystemInfo(&info);
return info.dwNumberOfProcessors;
#else
long rv = sysconf(_SC_NPROCESSORS_ONLN);
if (rv < 0) {
return 1;
}
return static_cast<int>(rv);
#endif
}
// ----------------------------------------------------------------------
#if defined __linux__ || defined __FreeBSD__ || defined __sun__ || defined __CYGWIN__ || defined __CYGWIN32__
static void ConstructFilename(const char* spec, pid_t pid,
char* buf, int buf_size) {
CHECK_LT(snprintf(buf, buf_size,
spec,
static_cast<int>(pid ? pid : getpid())), buf_size);
}
#endif
// A templatized helper function instantiated for Mach (OS X) only.
// It can handle finding info for both 32 bits and 64 bits.
// Returns true if it successfully handled the hdr, false else.
#ifdef __MACH__ // Mac OS X, almost certainly
template<uint32_t kMagic, uint32_t kLCSegment,
typename MachHeader, typename SegmentCommand>
static bool NextExtMachHelper(const mach_header* hdr,
int current_image, int current_load_cmd,
uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename,
uint64 *file_mapping, uint64 *file_pages,
uint64 *anon_mapping, uint64 *anon_pages,
dev_t *dev) {
static char kDefaultPerms[5] = "r-xp";
if (hdr->magic != kMagic)
return false;
const char* lc = (const char *)hdr + sizeof(MachHeader);
// TODO(csilvers): make this not-quadradic (increment and hold state)
for (int j = 0; j < current_load_cmd; j++) // advance to *our* load_cmd
lc += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
const intptr_t dlloff = _dyld_get_image_vmaddr_slide(current_image);
const SegmentCommand* sc = (const SegmentCommand *)lc;
if (start) *start = sc->vmaddr + dlloff;
if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
if (flags) *flags = kDefaultPerms; // can we do better?
if (offset) *offset = sc->fileoff;
if (inode) *inode = 0;
if (filename)
*filename = const_cast<char*>(_dyld_get_image_name(current_image));
if (file_mapping) *file_mapping = 0;
if (file_pages) *file_pages = 0; // could we use sc->filesize?
if (anon_mapping) *anon_mapping = 0;
if (anon_pages) *anon_pages = 0;
if (dev) *dev = 0;
return true;
}
return false;
}
#endif
// Finds |c| in |text|, and assign '\0' at the found position.
// The original character at the modified position should be |c|.
// A pointer to the modified position is stored in |endptr|.
// |endptr| should not be NULL.
static bool ExtractUntilChar(char *text, int c, char **endptr) {
CHECK_NE(text, NULL);
CHECK_NE(endptr, NULL);
char *found;
found = strchr(text, c);
if (found == NULL) {
*endptr = NULL;
return false;
}
*endptr = found;
*found = '\0';
return true;
}
// Increments |*text_pointer| while it points a whitespace character.
// It is to follow sscanf's whilespace handling.
static void SkipWhileWhitespace(char **text_pointer, int c) {
if (isspace(c)) {
while (isspace(**text_pointer) && isspace(*((*text_pointer) + 1))) {
++(*text_pointer);
}
}
}
template<class T>
static T StringToInteger(char *text, char **endptr, int base) {
assert(false);
return T();
}
template<>
int StringToInteger<int>(char *text, char **endptr, int base) {
return strtol(text, endptr, base);
}
template<>
int64 StringToInteger<int64>(char *text, char **endptr, int base) {
return strtoll(text, endptr, base);
}
template<>
uint64 StringToInteger<uint64>(char *text, char **endptr, int base) {
return strtoull(text, endptr, base);
}
template<typename T>
static T StringToIntegerUntilChar(
char *text, int base, int c, char **endptr_result) {
CHECK_NE(endptr_result, NULL);
*endptr_result = NULL;
char *endptr_extract;
if (!ExtractUntilChar(text, c, &endptr_extract))
return 0;
T result;
char *endptr_strto;
result = StringToInteger<T>(text, &endptr_strto, base);
*endptr_extract = c;
if (endptr_extract != endptr_strto)
return 0;
*endptr_result = endptr_extract;
SkipWhileWhitespace(endptr_result, c);
return result;
}
static char *CopyStringUntilChar(
char *text, unsigned out_len, int c, char *out) {
char *endptr;
if (!ExtractUntilChar(text, c, &endptr))
return NULL;
strncpy(out, text, out_len);
out[out_len-1] = '\0';
*endptr = c;
SkipWhileWhitespace(&endptr, c);
return endptr;
}
template<typename T>
static bool StringToIntegerUntilCharWithCheck(
T *outptr, char *text, int base, int c, char **endptr) {
*outptr = StringToIntegerUntilChar<T>(*endptr, base, c, endptr);
if (*endptr == NULL || **endptr == '\0') return false;
++(*endptr);
return true;
}
static bool ParseProcMapsLine(char *text, uint64 *start, uint64 *end,
char *flags, uint64 *offset,
int *major, int *minor, int64 *inode,
unsigned *filename_offset) {
#if defined(__linux__)
/*
* It's similar to:
* sscanf(text, "%"SCNx64"-%"SCNx64" %4s %"SCNx64" %x:%x %"SCNd64" %n",
* start, end, flags, offset, major, minor, inode, filename_offset)
*/
char *endptr = text;
if (endptr == NULL || *endptr == '\0') return false;
if (!StringToIntegerUntilCharWithCheck(start, endptr, 16, '-', &endptr))
return false;
if (!StringToIntegerUntilCharWithCheck(end, endptr, 16, ' ', &endptr))
return false;
endptr = CopyStringUntilChar(endptr, 5, ' ', flags);
if (endptr == NULL || *endptr == '\0') return false;
++endptr;
if (!StringToIntegerUntilCharWithCheck(offset, endptr, 16, ' ', &endptr))
return false;
if (!StringToIntegerUntilCharWithCheck(major, endptr, 16, ':', &endptr))
return false;
if (!StringToIntegerUntilCharWithCheck(minor, endptr, 16, ' ', &endptr))
return false;
if (!StringToIntegerUntilCharWithCheck(inode, endptr, 10, ' ', &endptr))
return false;
*filename_offset = (endptr - text);
return true;
#else
return false;
#endif
}
ProcMapsIterator::ProcMapsIterator(pid_t pid) {
Init(pid, NULL, false);
}
ProcMapsIterator::ProcMapsIterator(pid_t pid, Buffer *buffer) {
Init(pid, buffer, false);
}
ProcMapsIterator::ProcMapsIterator(pid_t pid, Buffer *buffer,
bool use_maps_backing) {
Init(pid, buffer, use_maps_backing);
}
void ProcMapsIterator::Init(pid_t pid, Buffer *buffer,
bool use_maps_backing) {
pid_ = pid;
using_maps_backing_ = use_maps_backing;
dynamic_buffer_ = NULL;
if (!buffer) {
// If the user didn't pass in any buffer storage, allocate it
// now. This is the normal case; the signal handler passes in a
// static buffer.
buffer = dynamic_buffer_ = new Buffer;
} else {
dynamic_buffer_ = NULL;
}
ibuf_ = buffer->buf_;
stext_ = etext_ = nextline_ = ibuf_;
ebuf_ = ibuf_ + Buffer::kBufSize - 1;
nextline_ = ibuf_;
#if defined(__linux__) || defined(__CYGWIN__) || defined(__CYGWIN32__)
if (use_maps_backing) { // don't bother with clever "self" stuff in this case
ConstructFilename("/proc/%d/maps_backing", pid, ibuf_, Buffer::kBufSize);
} else if (pid == 0) {
// We have to kludge a bit to deal with the args ConstructFilename
// expects. The 1 is never used -- it's only impt. that it's not 0.
ConstructFilename("/proc/self/maps", 1, ibuf_, Buffer::kBufSize);
} else {
ConstructFilename("/proc/%d/maps", pid, ibuf_, Buffer::kBufSize);
}
// No error logging since this can be called from the crash dump
// handler at awkward moments. Users should call Valid() before
// using.
NO_INTR(fd_ = open(ibuf_, O_RDONLY));
#elif defined(__FreeBSD__)
// We don't support maps_backing on freebsd
if (pid == 0) {
ConstructFilename("/proc/curproc/map", 1, ibuf_, Buffer::kBufSize);
} else {
ConstructFilename("/proc/%d/map", pid, ibuf_, Buffer::kBufSize);
}
NO_INTR(fd_ = open(ibuf_, O_RDONLY));
#elif defined(__sun__)
if (pid == 0) {
ConstructFilename("/proc/self/map", 1, ibuf_, Buffer::kBufSize);
} else {
ConstructFilename("/proc/%d/map", pid, ibuf_, Buffer::kBufSize);
}
NO_INTR(fd_ = open(ibuf_, O_RDONLY));
#elif defined(__MACH__)
current_image_ = _dyld_image_count(); // count down from the top
current_load_cmd_ = -1;
#elif defined(PLATFORM_WINDOWS)
snapshot_ = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE |
TH32CS_SNAPMODULE32,
GetCurrentProcessId());
memset(&module_, 0, sizeof(module_));
#else
fd_ = -1; // so Valid() is always false
#endif
}
ProcMapsIterator::~ProcMapsIterator() {
#if defined(PLATFORM_WINDOWS)
if (snapshot_ != INVALID_HANDLE_VALUE) CloseHandle(snapshot_);
#elif defined(__MACH__)
// no cleanup necessary!
#else
if (fd_ >= 0) NO_INTR(close(fd_));
#endif
delete dynamic_buffer_;
}
bool ProcMapsIterator::Valid() const {
#if defined(PLATFORM_WINDOWS)
return snapshot_ != INVALID_HANDLE_VALUE;
#elif defined(__MACH__)
return 1;
#else
return fd_ != -1;
#endif
}
bool ProcMapsIterator::Next(uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename) {
return NextExt(start, end, flags, offset, inode, filename, NULL, NULL,
NULL, NULL, NULL);
}
// This has too many arguments. It should really be building
// a map object and returning it. The problem is that this is called
// when the memory allocator state is undefined, hence the arguments.
bool ProcMapsIterator::NextExt(uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename,
uint64 *file_mapping, uint64 *file_pages,
uint64 *anon_mapping, uint64 *anon_pages,
dev_t *dev) {
#if defined(__linux__) || defined(__FreeBSD__) || defined(__CYGWIN__) || defined(__CYGWIN32__)
do {
// Advance to the start of the next line
stext_ = nextline_;
// See if we have a complete line in the buffer already
nextline_ = static_cast<char *>(memchr (stext_, '\n', etext_ - stext_));
if (!nextline_) {
// Shift/fill the buffer so we do have a line
int count = etext_ - stext_;
// Move the current text to the start of the buffer
memmove(ibuf_, stext_, count);
stext_ = ibuf_;
etext_ = ibuf_ + count;
int nread = 0; // fill up buffer with text
while (etext_ < ebuf_) {
NO_INTR(nread = read(fd_, etext_, ebuf_ - etext_));
if (nread > 0)
etext_ += nread;
else
break;
}
// Zero out remaining characters in buffer at EOF to avoid returning
// garbage from subsequent calls.
if (etext_ != ebuf_ && nread == 0) {
memset(etext_, 0, ebuf_ - etext_);
}
*etext_ = '\n'; // sentinel; safe because ibuf extends 1 char beyond ebuf
nextline_ = static_cast<char *>(memchr (stext_, '\n', etext_ + 1 - stext_));
}
*nextline_ = 0; // turn newline into nul
nextline_ += ((nextline_ < etext_)? 1 : 0); // skip nul if not end of text
// stext_ now points at a nul-terminated line
uint64 tmpstart, tmpend, tmpoffset;
int64 tmpinode;
int major, minor;
unsigned filename_offset = 0;
#if defined(__linux__)
// for now, assume all linuxes have the same format
if (!ParseProcMapsLine(
stext_,
start ? start : &tmpstart,
end ? end : &tmpend,
flags_,
offset ? offset : &tmpoffset,
&major, &minor,
inode ? inode : &tmpinode, &filename_offset)) continue;
#elif defined(__CYGWIN__) || defined(__CYGWIN32__)
// cygwin is like linux, except the third field is the "entry point"
// rather than the offset (see format_process_maps at
// http://cygwin.com/cgi-bin/cvsweb.cgi/src/winsup/cygwin/fhandler_process.cc?rev=1.89&content-type=text/x-cvsweb-markup&cvsroot=src
// Offset is always be 0 on cygwin: cygwin implements an mmap
// by loading the whole file and then calling NtMapViewOfSection.
// Cygwin also seems to set its flags kinda randomly; use windows default.
char tmpflags[5];
if (offset)
*offset = 0;
strcpy(flags_, "r-xp");
if (sscanf(stext_, "%llx-%llx %4s %llx %x:%x %lld %n",
start ? start : &tmpstart,
end ? end : &tmpend,
tmpflags,
&tmpoffset,
&major, &minor,
inode ? inode : &tmpinode, &filename_offset) != 7) continue;
#elif defined(__FreeBSD__)
// For the format, see http://www.freebsd.org/cgi/cvsweb.cgi/src/sys/fs/procfs/procfs_map.c?rev=1.31&content-type=text/x-cvsweb-markup
tmpstart = tmpend = tmpoffset = 0;
tmpinode = 0;
major = minor = 0; // can't get this info in freebsd
if (inode)
*inode = 0; // nor this
if (offset)
*offset = 0; // seems like this should be in there, but maybe not
// start end resident privateresident obj(?) prot refcnt shadowcnt
// flags copy_on_write needs_copy type filename:
// 0x8048000 0x804a000 2 0 0xc104ce70 r-x 1 0 0x0 COW NC vnode /bin/cat
if (sscanf(stext_, "0x%" SCNx64 " 0x%" SCNx64 " %*d %*d %*p %3s %*d %*d 0x%*x %*s %*s %*s %n",
start ? start : &tmpstart,
end ? end : &tmpend,
flags_,
&filename_offset) != 3) continue;
#endif
// Depending on the Linux kernel being used, there may or may not be a space
// after the inode if there is no filename. sscanf will in such situations
// nondeterministically either fill in filename_offset or not (the results
// differ on multiple calls in the same run even with identical arguments).
// We don't want to wander off somewhere beyond the end of the string.
size_t stext_length = strlen(stext_);
if (filename_offset == 0 || filename_offset > stext_length)
filename_offset = stext_length;
// We found an entry
if (flags) *flags = flags_;
if (filename) *filename = stext_ + filename_offset;
if (dev) *dev = minor | (major << 8);
if (using_maps_backing_) {
// Extract and parse physical page backing info.
char *backing_ptr = stext_ + filename_offset +
strlen(stext_+filename_offset);
// find the second '('
int paren_count = 0;
while (--backing_ptr > stext_) {
if (*backing_ptr == '(') {
++paren_count;
if (paren_count >= 2) {
uint64 tmp_file_mapping;
uint64 tmp_file_pages;
uint64 tmp_anon_mapping;
uint64 tmp_anon_pages;
sscanf(backing_ptr+1, "F %" SCNx64 " %" SCNd64 ") (A %" SCNx64 " %" SCNd64 ")",
file_mapping ? file_mapping : &tmp_file_mapping,
file_pages ? file_pages : &tmp_file_pages,
anon_mapping ? anon_mapping : &tmp_anon_mapping,
anon_pages ? anon_pages : &tmp_anon_pages);
// null terminate the file name (there is a space
// before the first (.
backing_ptr[-1] = 0;
break;
}
}
}
}
return true;
} while (etext_ > ibuf_);
#elif defined(__sun__)
// This is based on MA_READ == 4, MA_WRITE == 2, MA_EXEC == 1
static char kPerms[8][4] = { "---", "--x", "-w-", "-wx",
"r--", "r-x", "rw-", "rwx" };
COMPILE_ASSERT(MA_READ == 4, solaris_ma_read_must_equal_4);
COMPILE_ASSERT(MA_WRITE == 2, solaris_ma_write_must_equal_2);
COMPILE_ASSERT(MA_EXEC == 1, solaris_ma_exec_must_equal_1);
Buffer object_path;
int nread = 0; // fill up buffer with text
NO_INTR(nread = read(fd_, ibuf_, sizeof(prmap_t)));
if (nread == sizeof(prmap_t)) {
long inode_from_mapname = 0;
prmap_t* mapinfo = reinterpret_cast<prmap_t*>(ibuf_);
// Best-effort attempt to get the inode from the filename. I think the
// two middle ints are major and minor device numbers, but I'm not sure.
sscanf(mapinfo->pr_mapname, "ufs.%*d.%*d.%ld", &inode_from_mapname);
if (pid_ == 0) {
CHECK_LT(snprintf(object_path.buf_, Buffer::kBufSize,
"/proc/self/path/%s", mapinfo->pr_mapname),
Buffer::kBufSize);
} else {
CHECK_LT(snprintf(object_path.buf_, Buffer::kBufSize,
"/proc/%d/path/%s",
static_cast<int>(pid_), mapinfo->pr_mapname),
Buffer::kBufSize);
}
ssize_t len = readlink(object_path.buf_, current_filename_, PATH_MAX);
CHECK_LT(len, PATH_MAX);
if (len < 0)
len = 0;
current_filename_[len] = '\0';
if (start) *start = mapinfo->pr_vaddr;
if (end) *end = mapinfo->pr_vaddr + mapinfo->pr_size;
if (flags) *flags = kPerms[mapinfo->pr_mflags & 7];
if (offset) *offset = mapinfo->pr_offset;
if (inode) *inode = inode_from_mapname;
if (filename) *filename = current_filename_;
if (file_mapping) *file_mapping = 0;
if (file_pages) *file_pages = 0;
if (anon_mapping) *anon_mapping = 0;
if (anon_pages) *anon_pages = 0;
if (dev) *dev = 0;
return true;
}
#elif defined(__MACH__)
// We return a separate entry for each segment in the DLL. (TODO(csilvers):
// can we do better?) A DLL ("image") has load-commands, some of which
// talk about segment boundaries.
// cf image_for_address from http://svn.digium.com/view/asterisk/team/oej/minivoicemail/dlfcn.c?revision=53912
for (; current_image_ >= 0; current_image_--) {
const mach_header* hdr = _dyld_get_image_header(current_image_);
if (!hdr) continue;
if (current_load_cmd_ < 0) // set up for this image
current_load_cmd_ = hdr->ncmds; // again, go from the top down
// We start with the next load command (we've already looked at this one).
for (current_load_cmd_--; current_load_cmd_ >= 0; current_load_cmd_--) {
#ifdef MH_MAGIC_64
if (NextExtMachHelper<MH_MAGIC_64, LC_SEGMENT_64,
struct mach_header_64, struct segment_command_64>(
hdr, current_image_, current_load_cmd_,
start, end, flags, offset, inode, filename,
file_mapping, file_pages, anon_mapping,
anon_pages, dev)) {
return true;
}
#endif
if (NextExtMachHelper<MH_MAGIC, LC_SEGMENT,
struct mach_header, struct segment_command>(
hdr, current_image_, current_load_cmd_,
start, end, flags, offset, inode, filename,
file_mapping, file_pages, anon_mapping,
anon_pages, dev)) {
return true;
}
}
// If we get here, no more load_cmd's in this image talk about
// segments. Go on to the next image.
}
#elif defined(PLATFORM_WINDOWS)
static char kDefaultPerms[5] = "r-xp";
BOOL ok;
if (module_.dwSize == 0) { // only possible before first call
module_.dwSize = sizeof(module_);
ok = Module32First(snapshot_, &module_);
} else {
ok = Module32Next(snapshot_, &module_);
}
if (ok) {
uint64 base_addr = reinterpret_cast<DWORD_PTR>(module_.modBaseAddr);
if (start) *start = base_addr;
if (end) *end = base_addr + module_.modBaseSize;
if (flags) *flags = kDefaultPerms;
if (offset) *offset = 0;
if (inode) *inode = 0;
if (filename) *filename = module_.szExePath;
if (file_mapping) *file_mapping = 0;
if (file_pages) *file_pages = 0;
if (anon_mapping) *anon_mapping = 0;
if (anon_pages) *anon_pages = 0;
if (dev) *dev = 0;
return true;
}
#endif
// We didn't find anything
return false;
}
int ProcMapsIterator::FormatLine(char* buffer, int bufsize,
uint64 start, uint64 end, const char *flags,
uint64 offset, int64 inode,
const char *filename, dev_t dev) {
// We assume 'flags' looks like 'rwxp' or 'rwx'.
char r = (flags && flags[0] == 'r') ? 'r' : '-';
char w = (flags && flags[0] && flags[1] == 'w') ? 'w' : '-';
char x = (flags && flags[0] && flags[1] && flags[2] == 'x') ? 'x' : '-';
// p always seems set on linux, so we set the default to 'p', not '-'
char p = (flags && flags[0] && flags[1] && flags[2] && flags[3] != 'p')
? '-' : 'p';
const int rc = snprintf(buffer, bufsize,
"%08" PRIx64 "-%08" PRIx64 " %c%c%c%c %08" PRIx64 " %02x:%02x %-11" PRId64 " %s\n",
start, end, r,w,x,p, offset,
static_cast<int>(dev/256), static_cast<int>(dev%256),
inode, filename);
return (rc < 0 || rc >= bufsize) ? 0 : rc;
}
namespace tcmalloc {
// Helper to add the list of mapped shared libraries to a profile.
// Fill formatted "/proc/self/maps" contents into buffer 'buf' of size 'size'
// and return the actual size occupied in 'buf'. We fill wrote_all to true
// if we successfully wrote all proc lines to buf, false else.
// We do not provision for 0-terminating 'buf'.
int FillProcSelfMaps(char buf[], int size, bool* wrote_all) {
ProcMapsIterator::Buffer iterbuf;
ProcMapsIterator it(0, &iterbuf); // 0 means "current pid"
uint64 start, end, offset;
int64 inode;
char *flags, *filename;
int bytes_written = 0;
*wrote_all = true;
while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) {
const int line_length = it.FormatLine(buf + bytes_written,
size - bytes_written,
start, end, flags, offset,
inode, filename, 0);
if (line_length == 0)
*wrote_all = false; // failed to write this line out
else
bytes_written += line_length;
}
return bytes_written;
}
// Dump the same data as FillProcSelfMaps reads to fd.
// It seems easier to repeat parts of FillProcSelfMaps here than to
// reuse it via a call.
void DumpProcSelfMaps(RawFD fd) {
ProcMapsIterator::Buffer iterbuf;
ProcMapsIterator it(0, &iterbuf); // 0 means "current pid"
uint64 start, end, offset;
int64 inode;
char *flags, *filename;
ProcMapsIterator::Buffer linebuf;
while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) {
int written = it.FormatLine(linebuf.buf_, sizeof(linebuf.buf_),
start, end, flags, offset, inode, filename,
0);
RawWrite(fd, linebuf.buf_, written);
}
}
} // namespace tcmalloc

View File

@ -1,232 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// All functions here are thread-hostile due to file caching unless
// commented otherwise.
#ifndef _SYSINFO_H_
#define _SYSINFO_H_
#include "../config.h"
#include <time.h>
#if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__))
#include <windows.h> // for DWORD
#include <tlhelp32.h> // for CreateToolhelp32Snapshot
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for pid_t
#endif
#include <stddef.h> // for size_t
#include <limits.h> // for PATH_MAX
#include "base/basictypes.h"
#include "base/logging.h" // for RawFD
// This getenv function is safe to call before the C runtime is initialized.
// On Windows, it utilizes GetEnvironmentVariable() and on unix it uses
// /proc/self/environ instead calling getenv(). It's intended to be used in
// routines that run before main(), when the state required for getenv() may
// not be set up yet. In particular, errno isn't set up until relatively late
// (after the pthreads library has a chance to make it threadsafe), and
// getenv() doesn't work until then.
// On some platforms, this call will utilize the same, static buffer for
// repeated GetenvBeforeMain() calls. Callers should not expect pointers from
// this routine to be long lived.
// Note that on unix, /proc only has the environment at the time the
// application was started, so this routine ignores setenv() calls/etc. Also
// note it only reads the first 16K of the environment.
extern const char* GetenvBeforeMain(const char* name);
// This takes as an argument an environment-variable name (like
// CPUPROFILE) whose value is supposed to be a file-path, and sets
// path to that path, and returns true. Non-trivial for surprising
// reasons, as documented in sysinfo.cc. path must have space PATH_MAX.
extern bool GetUniquePathFromEnv(const char* env_name, char* path);
extern int GetSystemCPUsCount();
void SleepForMilliseconds(int milliseconds);
// Return true if we're running POSIX (e.g., NPTL on Linux) threads,
// as opposed to a non-POSIX thread library. The thing that we care
// about is whether a thread's pid is the same as the thread that
// spawned it. If so, this function returns true.
// Thread-safe.
// Note: We consider false negatives to be OK.
bool HasPosixThreads();
#ifndef SWIG // SWIG doesn't like struct Buffer and variable arguments.
// A ProcMapsIterator abstracts access to /proc/maps for a given
// process. Needs to be stack-allocatable and avoid using stdio/malloc
// so it can be used in the google stack dumper, heap-profiler, etc.
//
// On Windows and Mac OS X, this iterator iterates *only* over DLLs
// mapped into this process space. For Linux, FreeBSD, and Solaris,
// it iterates over *all* mapped memory regions, including anonymous
// mmaps. For other O/Ss, it is unlikely to work at all, and Valid()
// will always return false. Also note: this routine only works on
// FreeBSD if procfs is mounted: make sure this is in your /etc/fstab:
// proc /proc procfs rw 0 0
class ProcMapsIterator {
public:
struct Buffer {
#ifdef __FreeBSD__
// FreeBSD requires us to read all of the maps file at once, so
// we have to make a buffer that's "always" big enough
static const size_t kBufSize = 102400;
#else // a one-line buffer is good enough
static const size_t kBufSize = PATH_MAX + 1024;
#endif
char buf_[kBufSize];
};
// Create a new iterator for the specified pid. pid can be 0 for "self".
explicit ProcMapsIterator(pid_t pid);
// Create an iterator with specified storage (for use in signal
// handler). "buffer" should point to a ProcMapsIterator::Buffer
// buffer can be NULL in which case a bufer will be allocated.
ProcMapsIterator(pid_t pid, Buffer *buffer);
// Iterate through maps_backing instead of maps if use_maps_backing
// is true. Otherwise the same as above. buffer can be NULL and
// it will allocate a buffer itself.
ProcMapsIterator(pid_t pid, Buffer *buffer,
bool use_maps_backing);
// Returns true if the iterator successfully initialized;
bool Valid() const;
// Returns a pointer to the most recently parsed line. Only valid
// after Next() returns true, and until the iterator is destroyed or
// Next() is called again. This may give strange results on non-Linux
// systems. Prefer FormatLine() if that may be a concern.
const char *CurrentLine() const { return stext_; }
// Writes the "canonical" form of the /proc/xxx/maps info for a single
// line to the passed-in buffer. Returns the number of bytes written,
// or 0 if it was not able to write the complete line. (To guarantee
// success, buffer should have size at least Buffer::kBufSize.)
// Takes as arguments values set via a call to Next(). The
// "canonical" form of the line (taken from linux's /proc/xxx/maps):
// <start_addr(hex)>-<end_addr(hex)> <perms(rwxp)> <offset(hex)> +
// <major_dev(hex)>:<minor_dev(hex)> <inode> <filename> Note: the
// eg
// 08048000-0804c000 r-xp 00000000 03:01 3793678 /bin/cat
// If you don't have the dev_t (dev), feel free to pass in 0.
// (Next() doesn't return a dev_t, though NextExt does.)
//
// Note: if filename and flags were obtained via a call to Next(),
// then the output of this function is only valid if Next() returned
// true, and only until the iterator is destroyed or Next() is
// called again. (Since filename, at least, points into CurrentLine.)
static int FormatLine(char* buffer, int bufsize,
uint64 start, uint64 end, const char *flags,
uint64 offset, int64 inode, const char *filename,
dev_t dev);
// Find the next entry in /proc/maps; return true if found or false
// if at the end of the file.
//
// Any of the result pointers can be NULL if you're not interested
// in those values.
//
// If "flags" and "filename" are passed, they end up pointing to
// storage within the ProcMapsIterator that is valid only until the
// iterator is destroyed or Next() is called again. The caller may
// modify the contents of these strings (up as far as the first NUL,
// and only until the subsequent call to Next()) if desired.
// The offsets are all uint64 in order to handle the case of a
// 32-bit process running on a 64-bit kernel
//
// IMPORTANT NOTE: see top-of-class notes for details about what
// mapped regions Next() iterates over, depending on O/S.
// TODO(csilvers): make flags and filename const.
bool Next(uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename);
bool NextExt(uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename,
uint64 *file_mapping, uint64 *file_pages,
uint64 *anon_mapping, uint64 *anon_pages,
dev_t *dev);
~ProcMapsIterator();
private:
void Init(pid_t pid, Buffer *buffer, bool use_maps_backing);
char *ibuf_; // input buffer
char *stext_; // start of text
char *etext_; // end of text
char *nextline_; // start of next line
char *ebuf_; // end of buffer (1 char for a nul)
#if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__))
HANDLE snapshot_; // filehandle on dll info
// In a change from the usual W-A pattern, there is no A variant of
// MODULEENTRY32. Tlhelp32.h #defines the W variant, but not the A.
// We want the original A variants, and this #undef is the only
// way I see to get them. Redefining it when we're done prevents us
// from affecting other .cc files.
# ifdef MODULEENTRY32 // Alias of W
# undef MODULEENTRY32
MODULEENTRY32 module_; // info about current dll (and dll iterator)
# define MODULEENTRY32 MODULEENTRY32W
# else // It's the ascii, the one we want.
MODULEENTRY32 module_; // info about current dll (and dll iterator)
# endif
#elif defined(__MACH__)
int current_image_; // dll's are called "images" in macos parlance
int current_load_cmd_; // the segment of this dll we're examining
#elif defined(__sun__) // Solaris
int fd_;
char current_filename_[PATH_MAX];
#else
int fd_; // filehandle on /proc/*/maps
#endif
pid_t pid_;
char flags_[10];
Buffer* dynamic_buffer_; // dynamically-allocated Buffer
bool using_maps_backing_; // true if we are looking at maps_backing instead of maps.
};
#endif /* #ifndef SWIG */
// Helper routines
namespace tcmalloc {
int FillProcSelfMaps(char buf[], int size, bool* wrote_all);
void DumpProcSelfMaps(RawFD fd);
}
#endif /* #ifndef _SYSINFO_H_ */

View File

@ -1,134 +0,0 @@
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Le-Chun Wu
//
// This header file contains the macro definitions for thread safety
// annotations that allow the developers to document the locking policies
// of their multi-threaded code. The annotations can also help program
// analysis tools to identify potential thread safety issues.
//
// The annotations are implemented using GCC's "attributes" extension.
// Using the macros defined here instead of the raw GCC attributes allows
// for portability and future compatibility.
//
// This functionality is not yet fully implemented in perftools,
// but may be one day.
#ifndef BASE_THREAD_ANNOTATIONS_H_
#define BASE_THREAD_ANNOTATIONS_H_
#if defined(__GNUC__) \
&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) \
&& defined(__SUPPORT_TS_ANNOTATION__) && (!defined(SWIG))
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
// Document if a shared variable/field needs to be protected by a lock.
// GUARDED_BY allows the user to specify a particular lock that should be
// held when accessing the annotated variable, while GUARDED_VAR only
// indicates a shared variable should be guarded (by any lock). GUARDED_VAR
// is primarily used when the client cannot express the name of the lock.
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded)
// Document if the memory location pointed to by a pointer should be guarded
// by a lock when dereferencing the pointer. Similar to GUARDED_VAR,
// PT_GUARDED_VAR is primarily used when the client cannot express the name
// of the lock. Note that a pointer variable to a shared memory location
// could itself be a shared variable. For example, if a shared global pointer
// q, which is guarded by mu1, points to a shared memory location that is
// guarded by mu2, q should be annotated as follows:
// int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
#define PT_GUARDED_BY(x) \
THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x))
#define PT_GUARDED_VAR \
THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded)
// Document the acquisition order between locks that can be held
// simultaneously by a thread. For any two locks that need to be annotated
// to establish an acquisition order, only one of them needs the annotation.
// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
// and ACQUIRED_BEFORE.)
#define ACQUIRED_AFTER(x) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(x))
#define ACQUIRED_BEFORE(x) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(x))
// The following three annotations document the lock requirements for
// functions/methods.
// Document if a function expects certain locks to be held before it is called
#define EXCLUSIVE_LOCKS_REQUIRED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(x))
#define SHARED_LOCKS_REQUIRED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(x))
// Document the locks acquired in the body of the function. These locks
// cannot be held when calling this function (as google3's Mutex locks are
// non-reentrant).
#define LOCKS_EXCLUDED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(x))
// Document the lock the annotated function returns without acquiring it.
#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
// Document if a class/type is a lockable type (such as the Mutex class).
#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
// Document if a class is a scoped lockable type (such as the MutexLock class).
#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
// The following annotations specify lock and unlock primitives.
#define EXCLUSIVE_LOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(x))
#define SHARED_LOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(x))
#define EXCLUSIVE_TRYLOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(x))
#define SHARED_TRYLOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(x))
#define UNLOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(unlock(x))
// An escape hatch for thread safety analysis to ignore the annotated function.
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
#endif // BASE_THREAD_ANNOTATIONS_H_

View File

@ -1,83 +0,0 @@
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#include "config.h"
#include "base/thread_lister.h"
#include <stdio.h> /* needed for NULL on some powerpc platforms (?!) */
#include <sys/types.h>
#include <unistd.h> /* for getpid */
#ifdef HAVE_SYS_PRCTL
# include <sys/prctl.h>
#endif
#include "base/linuxthreads.h"
/* Include other thread listers here that define THREADS macro
* only when they can provide a good implementation.
*/
#ifndef THREADS
/* Default trivial thread lister for single-threaded applications,
* or if the multi-threading code has not been ported, yet.
*/
int TCMalloc_ListAllProcessThreads(void *parameter,
ListAllProcessThreadsCallBack callback, ...) {
int rc;
va_list ap;
pid_t pid;
#ifdef HAVE_SYS_PRCTL
int dumpable = prctl(PR_GET_DUMPABLE, 0);
if (!dumpable)
prctl(PR_SET_DUMPABLE, 1);
#endif
va_start(ap, callback);
pid = getpid();
rc = callback(parameter, 1, &pid, ap);
va_end(ap);
#ifdef HAVE_SYS_PRCTL
if (!dumpable)
prctl(PR_SET_DUMPABLE, 0);
#endif
return rc;
}
int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) {
return 1;
}
#endif /* ifndef THREADS */

View File

@ -1,83 +0,0 @@
/* -*- Mode: c; c-basic-offset: 2; indent-tabs-mode: nil -*- */
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#ifndef _THREAD_LISTER_H
#define _THREAD_LISTER_H
#include <stdarg.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*ListAllProcessThreadsCallBack)(void *parameter,
int num_threads,
pid_t *thread_pids,
va_list ap);
/* This function gets the list of all linux threads of the current process
* passes them to the 'callback' along with the 'parameter' pointer; at the
* call back call time all the threads are paused via
* PTRACE_ATTACH.
* The callback is executed from a separate thread which shares only the
* address space, the filesystem, and the filehandles with the caller. Most
* notably, it does not share the same pid and ppid; and if it terminates,
* the rest of the application is still there. 'callback' is supposed to do
* or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if
* the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
* signals are blocked. If the 'callback' decides to unblock them, it must
* ensure that they cannot terminate the application, or that
* TCMalloc_ResumeAllProcessThreads will get called.
* It is an error for the 'callback' to make any library calls that could
* acquire locks. Most notably, this means that most system calls have to
* avoid going through libc. Also, this means that it is not legal to call
* exit() or abort().
* We return -1 on error and the return value of 'callback' on success.
*/
int TCMalloc_ListAllProcessThreads(void *parameter,
ListAllProcessThreadsCallBack callback, ...);
/* This function resumes the list of all linux threads that
* TCMalloc_ListAllProcessThreads pauses before giving to its
* callback. The function returns non-zero if at least one thread was
* suspended and has now been resumed.
*/
int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids);
#ifdef __cplusplus
}
#endif
#endif /* _THREAD_LISTER_H */

View File

@ -1,143 +0,0 @@
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in the kernel VDSO page.
//
// VDSOSupport -- a class representing kernel VDSO (if present).
//
#include "base/vdso_support.h"
#ifdef HAVE_VDSO_SUPPORT // defined in vdso_support.h
#include <fcntl.h>
#include <stddef.h> // for ptrdiff_t
#include "base/atomicops.h" // for MemoryBarrier
#include "base/linux_syscall_support.h"
#include "base/logging.h"
#include "base/dynamic_annotations.h"
#include "base/basictypes.h" // for COMPILE_ASSERT
using base::subtle::MemoryBarrier;
#ifndef AT_SYSINFO_EHDR
#define AT_SYSINFO_EHDR 33
#endif
namespace base {
const void *VDSOSupport::vdso_base_ = ElfMemImage::kInvalidBase;
VDSOSupport::VDSOSupport()
// If vdso_base_ is still set to kInvalidBase, we got here
// before VDSOSupport::Init has been called. Call it now.
: image_(vdso_base_ == ElfMemImage::kInvalidBase ? Init() : vdso_base_) {
}
// NOTE: we can't use GoogleOnceInit() below, because we can be
// called by tcmalloc, and none of the *once* stuff may be functional yet.
//
// In addition, we hope that the VDSOSupportHelper constructor
// causes this code to run before there are any threads, and before
// InitGoogle() has executed any chroot or setuid calls.
//
// Finally, even if there is a race here, it is harmless, because
// the operation should be idempotent.
const void *VDSOSupport::Init() {
if (vdso_base_ == ElfMemImage::kInvalidBase) {
// Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
// on stack, and so glibc works as if VDSO was not present.
// But going directly to kernel via /proc/self/auxv below bypasses
// Valgrind zapping. So we check for Valgrind separately.
if (RunningOnValgrind()) {
vdso_base_ = NULL;
return NULL;
}
int fd = open("/proc/self/auxv", O_RDONLY);
if (fd == -1) {
// Kernel too old to have a VDSO.
vdso_base_ = NULL;
return NULL;
}
ElfW(auxv_t) aux;
while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
if (aux.a_type == AT_SYSINFO_EHDR) {
COMPILE_ASSERT(sizeof(vdso_base_) == sizeof(aux.a_un.a_val),
unexpected_sizeof_pointer_NE_sizeof_a_val);
vdso_base_ = reinterpret_cast<void *>(aux.a_un.a_val);
break;
}
}
close(fd);
if (vdso_base_ == ElfMemImage::kInvalidBase) {
// Didn't find AT_SYSINFO_EHDR in auxv[].
vdso_base_ = NULL;
}
}
return vdso_base_;
}
const void *VDSOSupport::SetBase(const void *base) {
CHECK(base != ElfMemImage::kInvalidBase);
const void *old_base = vdso_base_;
vdso_base_ = base;
image_.Init(base);
return old_base;
}
bool VDSOSupport::LookupSymbol(const char *name,
const char *version,
int type,
SymbolInfo *info) const {
return image_.LookupSymbol(name, version, type, info);
}
bool VDSOSupport::LookupSymbolByAddress(const void *address,
SymbolInfo *info_out) const {
return image_.LookupSymbolByAddress(address, info_out);
}
// We need to make sure VDSOSupport::Init() is called before
// the main() runs, since it might do something like setuid or
// chroot. If VDSOSupport
// is used in any global constructor, this will happen, since
// VDSOSupport's constructor calls Init. But if not, we need to
// ensure it here, with a global constructor of our own. This
// is an allowed exception to the normal rule against non-trivial
// global constructors.
static class VDSOInitHelper {
public:
VDSOInitHelper() { VDSOSupport::Init(); }
} vdso_init_helper;
}
#endif // HAVE_VDSO_SUPPORT

View File

@ -1,132 +0,0 @@
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in the kernel VDSO page.
//
// VDSO stands for "Virtual Dynamic Shared Object" -- a page of
// executable code, which looks like a shared library, but doesn't
// necessarily exist anywhere on disk, and which gets mmap()ed into
// every process by kernels which support VDSO, such as 2.6.x for 32-bit
// executables, and 2.6.24 and above for 64-bit executables.
//
// More details could be found here:
// http://www.trilithium.com/johan/2005/08/linux-gate/
//
// VDSOSupport -- a class representing kernel VDSO (if present).
//
// Example usage:
// VDSOSupport vdso;
// VDSOSupport::SymbolInfo info;
// typedef (*FN)(unsigned *, void *, void *);
// FN fn = NULL;
// if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
// fn = reinterpret_cast<FN>(info.address);
// }
#ifndef BASE_VDSO_SUPPORT_H_
#define BASE_VDSO_SUPPORT_H_
#include "../config.h"
#include "base/basictypes.h"
#include "base/elf_mem_image.h"
#ifdef HAVE_ELF_MEM_IMAGE
#define HAVE_VDSO_SUPPORT 1
#include <stdlib.h> // for NULL
namespace base {
// NOTE: this class may be used from within tcmalloc, and can not
// use any memory allocation routines.
class VDSOSupport {
public:
VDSOSupport();
typedef ElfMemImage::SymbolInfo SymbolInfo;
typedef ElfMemImage::SymbolIterator SymbolIterator;
// Answers whether we have a vdso at all.
bool IsPresent() const { return image_.IsPresent(); }
// Allow to iterate over all VDSO symbols.
SymbolIterator begin() const { return image_.begin(); }
SymbolIterator end() const { return image_.end(); }
// Look up versioned dynamic symbol in the kernel VDSO.
// Returns false if VDSO is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out != NULL, additional details are filled in.
bool LookupSymbol(const char *name, const char *version,
int symbol_type, SymbolInfo *info_out) const;
// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if VDSO isn't present
// or doesn't have a symbol overlapping given address.
// If info_out != NULL, additional details are filled in.
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
// Used only for testing. Replace real VDSO base with a mock.
// Returns previous value of vdso_base_. After you are done testing,
// you are expected to call SetBase() with previous value, in order to
// reset state to the way it was.
const void *SetBase(const void *s);
// Computes vdso_base_ and returns it. Should be called as early as
// possible; before any thread creation, chroot or setuid.
static const void *Init();
private:
// image_ represents VDSO ELF image in memory.
// image_.ehdr_ == NULL implies there is no VDSO.
ElfMemImage image_;
// Cached value of auxv AT_SYSINFO_EHDR, computed once.
// This is a tri-state:
// kInvalidBase => value hasn't been determined yet.
// 0 => there is no VDSO.
// else => vma of VDSO Elf{32,64}_Ehdr.
//
// When testing with mock VDSO, low bit is set.
// The low bit is always available because vdso_base_ is
// page-aligned.
static const void *vdso_base_;
DISALLOW_COPY_AND_ASSIGN(VDSOSupport);
};
} // namespace base
#endif // HAVE_ELF_MEM_IMAGE
#endif // BASE_VDSO_SUPPORT_H_

View File

@ -1,387 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
#include "config.h"
#include <algorithm>
#include "central_freelist.h"
#include "internal_logging.h" // for ASSERT, MESSAGE
#include "linked_list.h" // for SLL_Next, SLL_Push, etc
#include "page_heap.h" // for PageHeap
#include "static_vars.h" // for Static
using std::min;
using std::max;
namespace tcmalloc {
void CentralFreeList::Init(size_t cl) {
size_class_ = cl;
tcmalloc::DLL_Init(&empty_);
tcmalloc::DLL_Init(&nonempty_);
num_spans_ = 0;
counter_ = 0;
max_cache_size_ = kMaxNumTransferEntries;
#ifdef TCMALLOC_SMALL_BUT_SLOW
// Disable the transfer cache for the small footprint case.
cache_size_ = 0;
#else
cache_size_ = 16;
#endif
if (cl > 0) {
// Limit the maximum size of the cache based on the size class. If this
// is not done, large size class objects will consume a lot of memory if
// they just sit in the transfer cache.
int32_t bytes = Static::sizemap()->ByteSizeForClass(cl);
int32_t objs_to_move = Static::sizemap()->num_objects_to_move(cl);
ASSERT(objs_to_move > 0 && bytes > 0);
// Limit each size class cache to at most 1MB of objects or one entry,
// whichever is greater. Total transfer cache memory used across all
// size classes then can't be greater than approximately
// 1MB * kMaxNumTransferEntries.
// min and max are in parens to avoid macro-expansion on windows.
max_cache_size_ = (min)(max_cache_size_,
(max)(1, (1024 * 1024) / (bytes * objs_to_move)));
cache_size_ = (min)(cache_size_, max_cache_size_);
}
used_slots_ = 0;
ASSERT(cache_size_ <= max_cache_size_);
}
void CentralFreeList::ReleaseListToSpans(void* start) {
while (start) {
void *next = SLL_Next(start);
ReleaseToSpans(start);
start = next;
}
}
// MapObjectToSpan should logically be part of ReleaseToSpans. But
// this triggers an optimization bug in gcc 4.5.0. Moving to a
// separate function, and making sure that function isn't inlined,
// seems to fix the problem. It also should be fixed for gcc 4.5.1.
static
#if __GNUC__ == 4 && __GNUC_MINOR__ == 5 && __GNUC_PATCHLEVEL__ == 0
__attribute__ ((noinline))
#endif
Span* MapObjectToSpan(void* object) {
const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
Span* span = Static::pageheap()->GetDescriptor(p);
return span;
}
void CentralFreeList::ReleaseToSpans(void* object) {
Span* span = MapObjectToSpan(object);
ASSERT(span != NULL);
ASSERT(span->refcount > 0);
// If span is empty, move it to non-empty list
if (span->objects == NULL) {
tcmalloc::DLL_Remove(span);
tcmalloc::DLL_Prepend(&nonempty_, span);
Event(span, 'N', 0);
}
// The following check is expensive, so it is disabled by default
if (false) {
// Check that object does not occur in list
int got = 0;
for (void* p = span->objects; p != NULL; p = *((void**) p)) {
ASSERT(p != object);
got++;
}
ASSERT(got + span->refcount ==
(span->length<<kPageShift) /
Static::sizemap()->ByteSizeForClass(span->sizeclass));
}
counter_++;
span->refcount--;
if (span->refcount == 0) {
Event(span, '#', 0);
counter_ -= ((span->length<<kPageShift) /
Static::sizemap()->ByteSizeForClass(span->sizeclass));
tcmalloc::DLL_Remove(span);
--num_spans_;
// Release central list lock while operating on pageheap
lock_.Unlock();
{
SpinLockHolder h(Static::pageheap_lock());
Static::pageheap()->Delete(span);
}
lock_.Lock();
} else {
*(reinterpret_cast<void**>(object)) = span->objects;
span->objects = object;
}
}
bool CentralFreeList::EvictRandomSizeClass(
int locked_size_class, bool force) {
static int race_counter = 0;
int t = race_counter++; // Updated without a lock, but who cares.
if (t >= kNumClasses) {
while (t >= kNumClasses) {
t -= kNumClasses;
}
race_counter = t;
}
ASSERT(t >= 0);
ASSERT(t < kNumClasses);
if (t == locked_size_class) return false;
return Static::central_cache()[t].ShrinkCache(locked_size_class, force);
}
bool CentralFreeList::MakeCacheSpace() {
// Is there room in the cache?
if (used_slots_ < cache_size_) return true;
// Check if we can expand this cache?
if (cache_size_ == max_cache_size_) return false;
// Ok, we'll try to grab an entry from some other size class.
if (EvictRandomSizeClass(size_class_, false) ||
EvictRandomSizeClass(size_class_, true)) {
// Succeeded in evicting, we're going to make our cache larger.
// However, we may have dropped and re-acquired the lock in
// EvictRandomSizeClass (via ShrinkCache and the LockInverter), so the
// cache_size may have changed. Therefore, check and verify that it is
// still OK to increase the cache_size.
if (cache_size_ < max_cache_size_) {
cache_size_++;
return true;
}
}
return false;
}
namespace {
class LockInverter {
private:
SpinLock *held_, *temp_;
public:
inline explicit LockInverter(SpinLock* held, SpinLock *temp)
: held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
};
}
// This function is marked as NO_THREAD_SAFETY_ANALYSIS because it uses
// LockInverter to release one lock and acquire another in scoped-lock
// style, which our current annotation/analysis does not support.
bool CentralFreeList::ShrinkCache(int locked_size_class, bool force)
NO_THREAD_SAFETY_ANALYSIS {
// Start with a quick check without taking a lock.
if (cache_size_ == 0) return false;
// We don't evict from a full cache unless we are 'forcing'.
if (force == false && used_slots_ == cache_size_) return false;
// Grab lock, but first release the other lock held by this thread. We use
// the lock inverter to ensure that we never hold two size class locks
// concurrently. That can create a deadlock because there is no well
// defined nesting order.
LockInverter li(&Static::central_cache()[locked_size_class].lock_, &lock_);
ASSERT(used_slots_ <= cache_size_);
ASSERT(0 <= cache_size_);
if (cache_size_ == 0) return false;
if (used_slots_ == cache_size_) {
if (force == false) return false;
// ReleaseListToSpans releases the lock, so we have to make all the
// updates to the central list before calling it.
cache_size_--;
used_slots_--;
ReleaseListToSpans(tc_slots_[used_slots_].head);
return true;
}
cache_size_--;
return true;
}
void CentralFreeList::InsertRange(void *start, void *end, int N) {
SpinLockHolder h(&lock_);
if (N == Static::sizemap()->num_objects_to_move(size_class_) &&
MakeCacheSpace()) {
int slot = used_slots_++;
ASSERT(slot >=0);
ASSERT(slot < max_cache_size_);
TCEntry *entry = &tc_slots_[slot];
entry->head = start;
entry->tail = end;
return;
}
ReleaseListToSpans(start);
}
int CentralFreeList::RemoveRange(void **start, void **end, int N) {
ASSERT(N > 0);
lock_.Lock();
if (N == Static::sizemap()->num_objects_to_move(size_class_) &&
used_slots_ > 0) {
int slot = --used_slots_;
ASSERT(slot >= 0);
TCEntry *entry = &tc_slots_[slot];
*start = entry->head;
*end = entry->tail;
lock_.Unlock();
return N;
}
int result = 0;
*start = NULL;
*end = NULL;
// TODO: Prefetch multiple TCEntries?
result = FetchFromOneSpansSafe(N, start, end);
if (result != 0) {
while (result < N) {
int n;
void* head = NULL;
void* tail = NULL;
n = FetchFromOneSpans(N - result, &head, &tail);
if (!n) break;
result += n;
SLL_PushRange(start, head, tail);
}
}
lock_.Unlock();
return result;
}
int CentralFreeList::FetchFromOneSpansSafe(int N, void **start, void **end) {
int result = FetchFromOneSpans(N, start, end);
if (!result) {
Populate();
result = FetchFromOneSpans(N, start, end);
}
return result;
}
int CentralFreeList::FetchFromOneSpans(int N, void **start, void **end) {
if (tcmalloc::DLL_IsEmpty(&nonempty_)) return 0;
Span* span = nonempty_.next;
ASSERT(span->objects != NULL);
int result = 0;
void *prev, *curr;
curr = span->objects;
do {
prev = curr;
curr = *(reinterpret_cast<void**>(curr));
} while (++result < N && curr != NULL);
if (curr == NULL) {
// Move to empty list
tcmalloc::DLL_Remove(span);
tcmalloc::DLL_Prepend(&empty_, span);
Event(span, 'E', 0);
}
*start = span->objects;
*end = prev;
span->objects = curr;
SLL_SetNext(*end, NULL);
span->refcount += result;
counter_ -= result;
return result;
}
// Fetch memory from the system and add to the central cache freelist.
void CentralFreeList::Populate() {
// Release central list lock while operating on pageheap
lock_.Unlock();
const size_t npages = Static::sizemap()->class_to_pages(size_class_);
Span* span;
{
SpinLockHolder h(Static::pageheap_lock());
span = Static::pageheap()->New(npages);
if (span) Static::pageheap()->RegisterSizeClass(span, size_class_);
}
if (span == NULL) {
Log(kLog, __FILE__, __LINE__,
"tcmalloc: allocation failed", npages << kPageShift);
lock_.Lock();
return;
}
ASSERT(span->length == npages);
// Cache sizeclass info eagerly. Locking is not necessary.
// (Instead of being eager, we could just replace any stale info
// about this span, but that seems to be no better in practice.)
for (int i = 0; i < npages; i++) {
Static::pageheap()->CacheSizeClass(span->start + i, size_class_);
}
// Split the block into pieces and add to the free-list
// TODO: coloring of objects to avoid cache conflicts?
void** tail = &span->objects;
char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
char* limit = ptr + (npages << kPageShift);
const size_t size = Static::sizemap()->ByteSizeForClass(size_class_);
int num = 0;
while (ptr + size <= limit) {
*tail = ptr;
tail = reinterpret_cast<void**>(ptr);
ptr += size;
num++;
}
ASSERT(ptr <= limit);
*tail = NULL;
span->refcount = 0; // No sub-object in use yet
// Add span to list of non-empty spans
lock_.Lock();
tcmalloc::DLL_Prepend(&nonempty_, span);
++num_spans_;
counter_ += num;
}
int CentralFreeList::tc_length() {
SpinLockHolder h(&lock_);
return used_slots_ * Static::sizemap()->num_objects_to_move(size_class_);
}
size_t CentralFreeList::OverheadBytes() {
SpinLockHolder h(&lock_);
if (size_class_ == 0) { // 0 holds the 0-sized allocations
return 0;
}
const size_t pages_per_span = Static::sizemap()->class_to_pages(size_class_);
const size_t object_size = Static::sizemap()->class_to_size(size_class_);
ASSERT(object_size > 0);
const size_t overhead_per_span = (pages_per_span * kPageSize) % object_size;
return num_spans_ * overhead_per_span;
}
} // namespace tcmalloc

View File

@ -1,211 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
#ifndef TCMALLOC_CENTRAL_FREELIST_H_
#define TCMALLOC_CENTRAL_FREELIST_H_
#include "config.h"
#include <stddef.h> // for size_t
#ifdef HAVE_STDINT_H
#include <stdint.h> // for int32_t
#endif
#include "base/spinlock.h"
#include "base/thread_annotations.h"
#include "common.h"
#include "span.h"
namespace tcmalloc {
// Data kept per size-class in central cache.
class CentralFreeList {
public:
// A CentralFreeList may be used before its constructor runs.
// So we prevent lock_'s constructor from doing anything to the
// lock_ state.
CentralFreeList() : lock_(base::LINKER_INITIALIZED) { }
void Init(size_t cl);
// These methods all do internal locking.
// Insert the specified range into the central freelist. N is the number of
// elements in the range. RemoveRange() is the opposite operation.
void InsertRange(void *start, void *end, int N);
// Returns the actual number of fetched elements and sets *start and *end.
int RemoveRange(void **start, void **end, int N);
// Returns the number of free objects in cache.
int length() {
SpinLockHolder h(&lock_);
return counter_;
}
// Returns the number of free objects in the transfer cache.
int tc_length();
// Returns the memory overhead (internal fragmentation) attributable
// to the freelist. This is memory lost when the size of elements
// in a freelist doesn't exactly divide the page-size (an 8192-byte
// page full of 5-byte objects would have 2 bytes memory overhead).
size_t OverheadBytes();
// Lock/Unlock the internal SpinLock. Used on the pthread_atfork call
// to set the lock in a consistent state before the fork.
void Lock() {
lock_.Lock();
}
void Unlock() {
lock_.Unlock();
}
private:
// TransferCache is used to cache transfers of
// sizemap.num_objects_to_move(size_class) back and forth between
// thread caches and the central cache for a given size class.
struct TCEntry {
void *head; // Head of chain of objects.
void *tail; // Tail of chain of objects.
};
// A central cache freelist can have anywhere from 0 to kMaxNumTransferEntries
// slots to put link list chains into.
#ifdef TCMALLOC_SMALL_BUT_SLOW
// For the small memory model, the transfer cache is not used.
static const int kMaxNumTransferEntries = 0;
#else
// Starting point for the the maximum number of entries in the transfer cache.
// This actual maximum for a given size class may be lower than this
// maximum value.
static const int kMaxNumTransferEntries = 64;
#endif
// REQUIRES: lock_ is held
// Remove object from cache and return.
// Return NULL if no free entries in cache.
int FetchFromOneSpans(int N, void **start, void **end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock_ is held
// Remove object from cache and return. Fetches
// from pageheap if cache is empty. Only returns
// NULL on allocation failure.
int FetchFromOneSpansSafe(int N, void **start, void **end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock_ is held
// Release a linked list of objects to spans.
// May temporarily release lock_.
void ReleaseListToSpans(void *start) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock_ is held
// Release an object to spans.
// May temporarily release lock_.
void ReleaseToSpans(void* object) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock_ is held
// Populate cache by fetching from the page heap.
// May temporarily release lock_.
void Populate() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock is held.
// Tries to make room for a TCEntry. If the cache is full it will try to
// expand it at the cost of some other cache size. Return false if there is
// no space.
bool MakeCacheSpace() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock_ for locked_size_class is held.
// Picks a "random" size class to steal TCEntry slot from. In reality it
// just iterates over the sizeclasses but does so without taking a lock.
// Returns true on success.
// May temporarily lock a "random" size class.
static bool EvictRandomSizeClass(int locked_size_class, bool force);
// REQUIRES: lock_ is *not* held.
// Tries to shrink the Cache. If force is true it will relase objects to
// spans if it allows it to shrink the cache. Return false if it failed to
// shrink the cache. Decrements cache_size_ on succeess.
// May temporarily take lock_. If it takes lock_, the locked_size_class
// lock is released to keep the thread from holding two size class locks
// concurrently which could lead to a deadlock.
bool ShrinkCache(int locked_size_class, bool force) LOCKS_EXCLUDED(lock_);
// This lock protects all the data members. cached_entries and cache_size_
// may be looked at without holding the lock.
SpinLock lock_;
// We keep linked lists of empty and non-empty spans.
size_t size_class_; // My size class
Span empty_; // Dummy header for list of empty spans
Span nonempty_; // Dummy header for list of non-empty spans
size_t num_spans_; // Number of spans in empty_ plus nonempty_
size_t counter_; // Number of free objects in cache entry
// Here we reserve space for TCEntry cache slots. Space is preallocated
// for the largest possible number of entries than any one size class may
// accumulate. Not all size classes are allowed to accumulate
// kMaxNumTransferEntries, so there is some wasted space for those size
// classes.
TCEntry tc_slots_[kMaxNumTransferEntries];
// Number of currently used cached entries in tc_slots_. This variable is
// updated under a lock but can be read without one.
int32_t used_slots_;
// The current number of slots for this size class. This is an
// adaptive value that is increased if there is lots of traffic
// on a given size class.
int32_t cache_size_;
// Maximum size of the cache for a given size class.
int32_t max_cache_size_;
};
// Pads each CentralCache object to multiple of 64 bytes. Since some
// compilers (such as MSVC) don't like it when the padding is 0, I use
// template specialization to remove the padding entirely when
// sizeof(CentralFreeList) is a multiple of 64.
template<int kFreeListSizeMod64>
class CentralFreeListPaddedTo : public CentralFreeList {
private:
char pad_[64 - kFreeListSizeMod64];
};
template<>
class CentralFreeListPaddedTo<0> : public CentralFreeList {
};
class CentralFreeListPadded : public CentralFreeListPaddedTo<
sizeof(CentralFreeList) % 64> {
};
} // namespace tcmalloc
#endif // TCMALLOC_CENTRAL_FREELIST_H_

View File

@ -1,275 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
#include <stdlib.h> // for getenv and strtol
#include "config.h"
#include "common.h"
#include "system-alloc.h"
#include "base/spinlock.h"
#include "getenv_safe.h" // TCMallocGetenvSafe
namespace tcmalloc {
// Define the maximum number of object per classe type to transfer between
// thread and central caches.
static int32 FLAGS_tcmalloc_transfer_num_objects;
static const int32 kDefaultTransferNumObjecs = 512;
// The init function is provided to explicit initialize the variable value
// from the env. var to avoid C++ global construction that might defer its
// initialization after a malloc/new call.
static inline void InitTCMallocTransferNumObjects()
{
if (UNLIKELY(FLAGS_tcmalloc_transfer_num_objects == 0)) {
const char *envval = TCMallocGetenvSafe("TCMALLOC_TRANSFER_NUM_OBJ");
FLAGS_tcmalloc_transfer_num_objects = !envval ? kDefaultTransferNumObjecs :
strtol(envval, NULL, 10);
}
}
// Note: the following only works for "n"s that fit in 32-bits, but
// that is fine since we only use it for small sizes.
static inline int LgFloor(size_t n) {
int log = 0;
for (int i = 4; i >= 0; --i) {
int shift = (1 << i);
size_t x = n >> shift;
if (x != 0) {
n = x;
log += shift;
}
}
ASSERT(n == 1);
return log;
}
int AlignmentForSize(size_t size) {
int alignment = kAlignment;
if (size > kMaxSize) {
// Cap alignment at kPageSize for large sizes.
alignment = kPageSize;
} else if (size >= 128) {
// Space wasted due to alignment is at most 1/8, i.e., 12.5%.
alignment = (1 << LgFloor(size)) / 8;
} else if (size >= kMinAlign) {
// We need an alignment of at least 16 bytes to satisfy
// requirements for some SSE types.
alignment = kMinAlign;
}
// Maximum alignment allowed is page size alignment.
if (alignment > kPageSize) {
alignment = kPageSize;
}
CHECK_CONDITION(size < kMinAlign || alignment >= kMinAlign);
CHECK_CONDITION((alignment & (alignment - 1)) == 0);
return alignment;
}
int SizeMap::NumMoveSize(size_t size) {
if (size == 0) return 0;
// Use approx 64k transfers between thread and central caches.
int num = static_cast<int>(64.0 * 1024.0 / size);
if (num < 2) num = 2;
// Avoid bringing too many objects into small object free lists.
// If this value is too large:
// - We waste memory with extra objects sitting in the thread caches.
// - The central freelist holds its lock for too long while
// building a linked list of objects, slowing down the allocations
// of other threads.
// If this value is too small:
// - We go to the central freelist too often and we have to acquire
// its lock each time.
// This value strikes a balance between the constraints above.
if (num > FLAGS_tcmalloc_transfer_num_objects)
num = FLAGS_tcmalloc_transfer_num_objects;
return num;
}
// Initialize the mapping arrays
void SizeMap::Init() {
InitTCMallocTransferNumObjects();
// Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
if (ClassIndex(0) != 0) {
Log(kCrash, __FILE__, __LINE__,
"Invalid class index for size 0", ClassIndex(0));
}
if (ClassIndex(kMaxSize) >= sizeof(class_array_)) {
Log(kCrash, __FILE__, __LINE__,
"Invalid class index for kMaxSize", ClassIndex(kMaxSize));
}
// Compute the size classes we want to use
int sc = 1; // Next size class to assign
int alignment = kAlignment;
CHECK_CONDITION(kAlignment <= kMinAlign);
for (size_t size = kAlignment; size <= kMaxSize; size += alignment) {
alignment = AlignmentForSize(size);
CHECK_CONDITION((size % alignment) == 0);
int blocks_to_move = NumMoveSize(size) / 4;
size_t psize = 0;
do {
psize += kPageSize;
// Allocate enough pages so leftover is less than 1/8 of total.
// This bounds wasted space to at most 12.5%.
while ((psize % size) > (psize >> 3)) {
psize += kPageSize;
}
// Continue to add pages until there are at least as many objects in
// the span as are needed when moving objects from the central
// freelists and spans to the thread caches.
} while ((psize / size) < (blocks_to_move));
const size_t my_pages = psize >> kPageShift;
if (sc > 1 && my_pages == class_to_pages_[sc-1]) {
// See if we can merge this into the previous class without
// increasing the fragmentation of the previous class.
const size_t my_objects = (my_pages << kPageShift) / size;
const size_t prev_objects = (class_to_pages_[sc-1] << kPageShift)
/ class_to_size_[sc-1];
if (my_objects == prev_objects) {
// Adjust last class to include this size
class_to_size_[sc-1] = size;
continue;
}
}
// Add new class
class_to_pages_[sc] = my_pages;
class_to_size_[sc] = size;
sc++;
}
if (sc != kNumClasses) {
Log(kCrash, __FILE__, __LINE__,
"wrong number of size classes: (found vs. expected )", sc, kNumClasses);
}
// Initialize the mapping arrays
int next_size = 0;
for (int c = 1; c < kNumClasses; c++) {
const int max_size_in_class = class_to_size_[c];
for (int s = next_size; s <= max_size_in_class; s += kAlignment) {
class_array_[ClassIndex(s)] = c;
}
next_size = max_size_in_class + kAlignment;
}
// Double-check sizes just to be safe
for (size_t size = 0; size <= kMaxSize;) {
const int sc = SizeClass(size);
if (sc <= 0 || sc >= kNumClasses) {
Log(kCrash, __FILE__, __LINE__,
"Bad size class (class, size)", sc, size);
}
if (sc > 1 && size <= class_to_size_[sc-1]) {
Log(kCrash, __FILE__, __LINE__,
"Allocating unnecessarily large class (class, size)", sc, size);
}
const size_t s = class_to_size_[sc];
if (size > s || s == 0) {
Log(kCrash, __FILE__, __LINE__,
"Bad (class, size, requested)", sc, s, size);
}
if (size <= kMaxSmallSize) {
size += 8;
} else {
size += 128;
}
}
// Initialize the num_objects_to_move array.
for (size_t cl = 1; cl < kNumClasses; ++cl) {
num_objects_to_move_[cl] = NumMoveSize(ByteSizeForClass(cl));
}
}
// Metadata allocator -- keeps stats about how many bytes allocated.
static uint64_t metadata_system_bytes_ = 0;
static const size_t kMetadataAllocChunkSize = 8*1024*1024;
// As ThreadCache objects are allocated with MetaDataAlloc, and also
// CACHELINE_ALIGNED, we must use the same alignment as TCMalloc_SystemAlloc.
static const size_t kMetadataAllignment = sizeof(MemoryAligner);
static char *metadata_chunk_alloc_;
static size_t metadata_chunk_avail_;
static SpinLock metadata_alloc_lock(SpinLock::LINKER_INITIALIZED);
void* MetaDataAlloc(size_t bytes) {
if (bytes >= kMetadataAllocChunkSize) {
void *rv = TCMalloc_SystemAlloc(bytes,
NULL, kMetadataAllignment);
if (rv != NULL) {
metadata_system_bytes_ += bytes;
}
return rv;
}
SpinLockHolder h(&metadata_alloc_lock);
// the following works by essentially turning address to integer of
// log_2 kMetadataAllignment size and negating it. I.e. negated
// value + original value gets 0 and that's what we want modulo
// kMetadataAllignment. Note, we negate before masking higher bits
// off, otherwise we'd have to mask them off after negation anyways.
intptr_t alignment = -reinterpret_cast<intptr_t>(metadata_chunk_alloc_) & (kMetadataAllignment-1);
if (metadata_chunk_avail_ < bytes + alignment) {
size_t real_size;
void *ptr = TCMalloc_SystemAlloc(kMetadataAllocChunkSize,
&real_size, kMetadataAllignment);
if (ptr == NULL) {
return NULL;
}
metadata_chunk_alloc_ = static_cast<char *>(ptr);
metadata_chunk_avail_ = real_size;
alignment = 0;
}
void *rv = static_cast<void *>(metadata_chunk_alloc_ + alignment);
bytes += alignment;
metadata_chunk_alloc_ += bytes;
metadata_chunk_avail_ -= bytes;
metadata_system_bytes_ += bytes;
return rv;
}
uint64_t metadata_system_bytes() { return metadata_system_bytes_; }
} // namespace tcmalloc

View File

@ -1,295 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
//
// Common definitions for tcmalloc code.
#ifndef TCMALLOC_COMMON_H_
#define TCMALLOC_COMMON_H_
#include "config.h"
#include <stddef.h> // for size_t
#ifdef HAVE_STDINT_H
#include <stdint.h> // for uintptr_t, uint64_t
#endif
#include "internal_logging.h" // for ASSERT, etc
#include "base/basictypes.h" // for LIKELY, etc
#ifdef HAVE_BUILTIN_EXPECT
#define LIKELY(x) __builtin_expect(!!(x), 1)
#define UNLIKELY(x) __builtin_expect(!!(x), 0)
#else
#define LIKELY(x) (x)
#define UNLIKELY(x) (x)
#endif
// Type that can hold a page number
typedef uintptr_t PageID;
// Type that can hold the length of a run of pages
typedef uintptr_t Length;
//-------------------------------------------------------------------
// Configuration
//-------------------------------------------------------------------
#if defined(TCMALLOC_ALIGN_8BYTES)
// Unless we force to use 8 bytes alignment we use an alignment of
// at least 16 bytes to statisfy requirements for some SSE types.
// Keep in mind when using the 16 bytes alignment you can have a space
// waste due alignment of 25%. (eg malloc of 24 bytes will get 32 bytes)
static const size_t kMinAlign = 8;
// Number of classes created until reach page size 128.
static const size_t kBaseClasses = 16;
#else
static const size_t kMinAlign = 16;
static const size_t kBaseClasses = 9;
#endif
// Using large pages speeds up the execution at a cost of larger memory use.
// Deallocation may speed up by a factor as the page map gets 8x smaller, so
// lookups in the page map result in fewer L2 cache misses, which translates to
// speedup for application/platform combinations with high L2 cache pressure.
// As the number of size classes increases with large pages, we increase
// the thread cache allowance to avoid passing more free ranges to and from
// central lists. Also, larger pages are less likely to get freed.
// These two factors cause a bounded increase in memory use.
#if defined(TCMALLOC_32K_PAGES)
static const size_t kPageShift = 15;
static const size_t kNumClasses = kBaseClasses + 69;
#elif defined(TCMALLOC_64K_PAGES)
static const size_t kPageShift = 16;
static const size_t kNumClasses = kBaseClasses + 73;
#else
static const size_t kPageShift = 13;
static const size_t kNumClasses = kBaseClasses + 79;
#endif
static const size_t kMaxThreadCacheSize = 4 << 20;
static const size_t kPageSize = 1 << kPageShift;
static const size_t kMaxSize = 256 * 1024;
static const size_t kAlignment = 8;
static const size_t kLargeSizeClass = 0;
// For all span-lengths < kMaxPages we keep an exact-size list.
static const size_t kMaxPages = 1 << (20 - kPageShift);
// Default bound on the total amount of thread caches.
#ifdef TCMALLOC_SMALL_BUT_SLOW
// Make the overall thread cache no bigger than that of a single thread
// for the small memory footprint case.
static const size_t kDefaultOverallThreadCacheSize = kMaxThreadCacheSize;
#else
static const size_t kDefaultOverallThreadCacheSize = 8u * kMaxThreadCacheSize;
#endif
// Lower bound on the per-thread cache sizes
static const size_t kMinThreadCacheSize = kMaxSize * 2;
// The number of bytes one ThreadCache will steal from another when
// the first ThreadCache is forced to Scavenge(), delaying the
// next call to Scavenge for this thread.
static const size_t kStealAmount = 1 << 16;
// The number of times that a deallocation can cause a freelist to
// go over its max_length() before shrinking max_length().
static const int kMaxOverages = 3;
// Maximum length we allow a per-thread free-list to have before we
// move objects from it into the corresponding central free-list. We
// want this big to avoid locking the central free-list too often. It
// should not hurt to make this list somewhat big because the
// scavenging code will shrink it down when its contents are not in use.
static const int kMaxDynamicFreeListLength = 8192;
static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
#if defined __x86_64__
// All current and planned x86_64 processors only look at the lower 48 bits
// in virtual to physical address translation. The top 16 are thus unused.
// TODO(rus): Under what operating systems can we increase it safely to 17?
// This lets us use smaller page maps. On first allocation, a 36-bit page map
// uses only 96 KB instead of the 4.5 MB used by a 52-bit page map.
static const int kAddressBits = (sizeof(void*) < 8 ? (8 * sizeof(void*)) : 48);
#else
static const int kAddressBits = 8 * sizeof(void*);
#endif
namespace tcmalloc {
// Convert byte size into pages. This won't overflow, but may return
// an unreasonably large value if bytes is huge enough.
inline Length pages(size_t bytes) {
return (bytes >> kPageShift) +
((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
}
// For larger allocation sizes, we use larger memory alignments to
// reduce the number of size classes.
int AlignmentForSize(size_t size);
// Size-class information + mapping
class SizeMap {
private:
// Number of objects to move between a per-thread list and a central
// list in one shot. We want this to be not too small so we can
// amortize the lock overhead for accessing the central list. Making
// it too big may temporarily cause unnecessary memory wastage in the
// per-thread free list until the scavenger cleans up the list.
int num_objects_to_move_[kNumClasses];
//-------------------------------------------------------------------
// Mapping from size to size_class and vice versa
//-------------------------------------------------------------------
// Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
// array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
// So for these larger sizes we have an array indexed by ceil(size/128).
//
// We flatten both logical arrays into one physical array and use
// arithmetic to compute an appropriate index. The constants used by
// ClassIndex() were selected to make the flattening work.
//
// Examples:
// Size Expression Index
// -------------------------------------------------------
// 0 (0 + 7) / 8 0
// 1 (1 + 7) / 8 1
// ...
// 1024 (1024 + 7) / 8 128
// 1025 (1025 + 127 + (120<<7)) / 128 129
// ...
// 32768 (32768 + 127 + (120<<7)) / 128 376
static const int kMaxSmallSize = 1024;
static const size_t kClassArraySize =
((kMaxSize + 127 + (120 << 7)) >> 7) + 1;
unsigned char class_array_[kClassArraySize];
static inline size_t SmallSizeClass(size_t s) {
return (static_cast<uint32_t>(s) + 7) >> 3;
}
static inline size_t LargeSizeClass(size_t s) {
return (static_cast<uint32_t>(s) + 127 + (120 << 7)) >> 7;
}
// Compute index of the class_array[] entry for a given size
static inline size_t ClassIndex(size_t s) {
// Use unsigned arithmetic to avoid unnecessary sign extensions.
ASSERT(0 <= s);
ASSERT(s <= kMaxSize);
if (LIKELY(s <= kMaxSmallSize)) {
return SmallSizeClass(s);
} else {
return LargeSizeClass(s);
}
}
int NumMoveSize(size_t size);
// Mapping from size class to max size storable in that class
size_t class_to_size_[kNumClasses];
// Mapping from size class to number of pages to allocate at a time
size_t class_to_pages_[kNumClasses];
public:
// Constructor should do nothing since we rely on explicit Init()
// call, which may or may not be called before the constructor runs.
SizeMap() { }
// Initialize the mapping arrays
void Init();
inline int SizeClass(size_t size) {
return class_array_[ClassIndex(size)];
}
inline bool MaybeSizeClass(size_t size, size_t *size_class) {
size_t class_idx;
if (LIKELY(size <= kMaxSmallSize)) {
class_idx = SmallSizeClass(size);
} else if (size <= kMaxSize) {
class_idx = LargeSizeClass(size);
} else {
return false;
}
*size_class = class_array_[class_idx];
return true;
}
// Get the byte-size for a specified class
inline size_t ByteSizeForClass(size_t cl) {
return class_to_size_[cl];
}
// Mapping from size class to max size storable in that class
inline size_t class_to_size(size_t cl) {
return class_to_size_[cl];
}
// Mapping from size class to number of pages to allocate at a time
inline size_t class_to_pages(size_t cl) {
return class_to_pages_[cl];
}
// Number of objects to move between a per-thread list and a central
// list in one shot. We want this to be not too small so we can
// amortize the lock overhead for accessing the central list. Making
// it too big may temporarily cause unnecessary memory wastage in the
// per-thread free list until the scavenger cleans up the list.
inline int num_objects_to_move(size_t cl) {
return num_objects_to_move_[cl];
}
};
// Allocates "bytes" worth of memory and returns it. Increments
// metadata_system_bytes appropriately. May return NULL if allocation
// fails. Requires pageheap_lock is held.
void* MetaDataAlloc(size_t bytes);
// Returns the total number of bytes allocated from the system.
// Requires pageheap_lock is held.
uint64_t metadata_system_bytes();
// size/depth are made the same size as a pointer so that some generic
// code below can conveniently cast them back and forth to void*.
static const int kMaxStackDepth = 31;
struct StackTrace {
uintptr_t size; // Size of object
uintptr_t depth; // Number of PC values stored in array below
void* stack[kMaxStackDepth];
};
} // namespace tcmalloc
#endif // TCMALLOC_COMMON_H_

View File

@ -1,323 +0,0 @@
/* src/config.h. Generated from config.h.in by configure. */
/* src/config.h.in. Generated from configure.ac by autoheader. */
#ifndef GPERFTOOLS_CONFIG_H_
#define GPERFTOOLS_CONFIG_H_
/* Build runtime detection for sized delete */
/* #undef ENABLE_DYNAMIC_SIZED_DELETE */
/* Build sized deletion operators */
/* #undef ENABLE_SIZED_DELETE */
/* Define to 1 if compiler supports __builtin_expect */
#if _MSC_VER
#define HAVE_BUILTIN_EXPECT 0
#else
#define HAVE_BUILTIN_EXPECT 1
#endif
/* Define to 1 if compiler supports __builtin_stack_pointer */
/* #undef HAVE_BUILTIN_STACK_POINTER */
/* Define to 1 if you have the <conflict-signal.h> header file. */
/* #undef HAVE_CONFLICT_SIGNAL_H */
/* Define to 1 if you have the <cygwin/signal.h> header file. */
/* #undef HAVE_CYGWIN_SIGNAL_H */
/* Define to 1 if you have the declaration of `backtrace', and to 0 if you
don't. */
/* #undef HAVE_DECL_BACKTRACE */
/* Define to 1 if you have the declaration of `cfree', and to 0 if you don't.
*/
#define HAVE_DECL_CFREE 1
/* Define to 1 if you have the declaration of `memalign', and to 0 if you
don't. */
#define HAVE_DECL_MEMALIGN 1
/* Define to 1 if you have the declaration of `nanosleep', and to 0 if you
don't. */
/* #undef HAVE_DECL_NANOSLEEP */
/* Define to 1 if you have the declaration of `posix_memalign', and to 0 if
you don't. */
#define HAVE_DECL_POSIX_MEMALIGN 1
/* Define to 1 if you have the declaration of `pvalloc', and to 0 if you
don't. */
#define HAVE_DECL_PVALLOC 1
/* Define to 1 if you have the declaration of `sleep', and to 0 if you don't.
*/
/* #undef HAVE_DECL_SLEEP */
/* Define to 1 if you have the declaration of `uname', and to 0 if you don't.
*/
#define HAVE_DECL_UNAME 1
/* Define to 1 if you have the declaration of `valloc', and to 0 if you don't.
*/
#define HAVE_DECL_VALLOC 1
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define to 1 if the system has the type `Elf32_Versym'. */
#define HAVE_ELF32_VERSYM 1
/* Define to 1 if you have the <execinfo.h> header file. */
#define HAVE_EXECINFO_H 1
/* Define to 1 if you have the <fcntl.h> header file. */
#define HAVE_FCNTL_H 1
/* Define to 1 if you have the <features.h> header file. */
#if !defined(__APPLE__) && !defined(__FreeBSD__)
#define HAVE_FEATURES_H 1
#endif
/* Define to 1 if you have the `fork' function. */
#define HAVE_FORK 1
/* Define to 1 if you have the `geteuid' function. */
#define HAVE_GETEUID 1
/* Define to 1 if you have the `getpagesize' function. */
#define HAVE_GETPAGESIZE 1
/* Define to 1 if you have the <glob.h> header file. */
#define HAVE_GLOB_H 1
/* Define to 1 if you have the <grp.h> header file. */
#define HAVE_GRP_H 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the <libunwind.h> header file. */
//#define HAVE_LIBUNWIND_H 1
/* Define to 1 if you have the <linux/ptrace.h> header file. */
#if !defined(__APPLE__) && !defined(__FreeBSD__)
#define HAVE_LINUX_PTRACE_H 1
#endif
/* Define if this is Linux that has SIGEV_THREAD_ID */
#define HAVE_LINUX_SIGEV_THREAD_ID 1
/* Define to 1 if you have the <malloc.h> header file. */
#if !defined(__FreeBSD__)
#define HAVE_MALLOC_H 1
#endif
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have a working `mmap' system call. */
#define HAVE_MMAP 1
/* define if the compiler implements namespaces */
#define HAVE_NAMESPACES 1
/* Define to 1 if you have the <poll.h> header file. */
#define HAVE_POLL_H 1
/* define if libc has program_invocation_name */
#if !defined(__APPLE__) && !defined(__FreeBSD__)
#define HAVE_PROGRAM_INVOCATION_NAME 1
#endif
/* Define if you have POSIX threads libraries and header files. */
#define HAVE_PTHREAD 1
/* defined to 1 if pthread symbols are exposed even without include pthread.h
*/
/* #undef HAVE_PTHREAD_DESPITE_ASKING_FOR */
/* Define to 1 if you have the <pwd.h> header file. */
#define HAVE_PWD_H 1
/* Define to 1 if you have the `sbrk' function. */
#define HAVE_SBRK 1
/* Define to 1 if you have the <sched.h> header file. */
#define HAVE_SCHED_H 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if the system has the type `struct mallinfo'. */
//#if !defined(__APPLE__) && !defined(__FreeBSD__)
#if !defined(__APPLE__)
#define HAVE_STRUCT_MALLINFO 1
#endif
/* Define to 1 if you have the <sys/cdefs.h> header file. */
#define HAVE_SYS_CDEFS_H 1
/* Define to 1 if you have the <sys/param.h> header file. */
#define HAVE_SYS_PARAM_H 1
/* Define to 1 if you have the <sys/prctl.h> header file. */
#define HAVE_SYS_PRCTL_H 1
/* Define to 1 if you have the <sys/resource.h> header file. */
#define HAVE_SYS_RESOURCE_H 1
/* Define to 1 if you have the <sys/socket.h> header file. */
#define HAVE_SYS_SOCKET_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/syscall.h> header file. */
#define HAVE_SYS_SYSCALL_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <sys/ucontext.h> header file. */
/* #undef HAVE_SYS_UCONTEXT_H */
/* Define to 1 if you have the <sys/wait.h> header file. */
#define HAVE_SYS_WAIT_H 1
/* Define to 1 if compiler supports __thread */
#define HAVE_TLS 1
/* Define to 1 if you have the <ucontext.h> header file. */
/* #undef HAVE_UCONTEXT_H */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Whether <unwind.h> contains _Unwind_Backtrace */
#define HAVE_UNWIND_BACKTRACE 1
/* Define to 1 if you have the <unwind.h> header file. */
#define HAVE_UNWIND_H 1
/* Define to 1 if you have the <valgrind.h> header file. */
/* #undef HAVE_VALGRIND_H */
/* define if your compiler has __attribute__ */
#define HAVE___ATTRIBUTE__ 1
/* Define to 1 if compiler supports __environ */
#if !defined(__APPLE__) && !defined(__FreeBSD__)
#define HAVE___ENVIRON 1
#endif
/* Define to 1 if the system has the type `__int64'. */
/* #undef HAVE___INT64 */
/* prefix where we look for installed files */
#define INSTALL_PREFIX "/usr/local"
/* Define to 1 if int32_t is equivalent to intptr_t */
/* #undef INT32_EQUALS_INTPTR */
/* Define to the sub-directory in which libtool stores uninstalled libraries.
*/
#define LT_OBJDIR ".libs/"
/* Name of package */
#define PACKAGE "gperftools"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "gperftools@googlegroups.com"
/* Define to the full name of this package. */
#define PACKAGE_NAME "gperftools"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "gperftools 2.5"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "gperftools"
/* Define to the home page for this package. */
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION "2.5"
/* How to access the PC from a struct ucontext */
/* #undef PC_FROM_UCONTEXT */
/* Always the empty-string on non-windows systems. On windows, should be
"__declspec(dllexport)". This way, when we compile the dll, we export our
functions/classes. It's safe to define this here because config.h is only
used internally, to compile the DLL, and every DLL source file #includes
"config.h" before anything else. */
#define PERFTOOLS_DLL_DECL /**/
/* printf format code for printing a size_t and ssize_t */
#define PRIdS "ld"
/* printf format code for printing a size_t and ssize_t */
#define PRIuS "lu"
/* printf format code for printing a size_t and ssize_t */
#define PRIxS "lx"
/* Mark the systems where we know it's bad if pthreads runs too
early before main (before threads are initialized, presumably). */
#ifdef __FreeBSD__
#define PTHREADS_CRASHES_IF_RUN_TOO_EARLY 1
#endif
/* Define to necessary symbol if this constant uses a non-standard name on
your system. */
/* #undef PTHREAD_CREATE_JOINABLE */
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* the namespace where STL code like vector<> is defined */
#define STL_NAMESPACE std
/* Define 32K of internal pages size for tcmalloc */
/* #undef TCMALLOC_32K_PAGES */
/* Define 64K of internal pages size for tcmalloc */
/* #undef TCMALLOC_64K_PAGES */
/* Define 8 bytes of allocation alignment for tcmalloc */
/* #undef TCMALLOC_ALIGN_8BYTES */
/* Version number of package */
#define VERSION "2.5"
/* C99 says: define this to get the PRI... macros from stdint.h */
#ifndef __STDC_FORMAT_MACROS
# define __STDC_FORMAT_MACROS 1
#endif
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef __cplusplus
/* #undef inline */
#endif
#ifdef __MINGW32__
#include "windows/mingw.h"
#endif
#endif /* #ifndef GPERFTOOLS_CONFIG_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,63 +0,0 @@
/* -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
* Copyright (c) 2014, gperftools Contributors
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GETENV_SAFE_H
#define GETENV_SAFE_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* This getenv function is safe to call before the C runtime is initialized.
* On Windows, it utilizes GetEnvironmentVariable() and on unix it uses
* /proc/self/environ instead calling getenv(). It's intended to be used in
* routines that run before main(), when the state required for getenv() may
* not be set up yet. In particular, errno isn't set up until relatively late
* (after the pthreads library has a chance to make it threadsafe), and
* getenv() doesn't work until then.
* On some platforms, this call will utilize the same, static buffer for
* repeated GetenvBeforeMain() calls. Callers should not expect pointers from
* this routine to be long lived.
* Note that on unix, /proc only has the environment at the time the
* application was started, so this routine ignores setenv() calls/etc. Also
* note it only reads the first 16K of the environment.
*
* NOTE: this is version of GetenvBeforeMain that's usable from
* C. Implementation is in sysinfo.cc
*/
const char* TCMallocGetenvSafe(const char* name);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,192 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Craig Silverstein
//
// This is an internal header file used by profiler.cc. It defines
// the single (inline) function GetPC. GetPC is used in a signal
// handler to figure out the instruction that was being executed when
// the signal-handler was triggered.
//
// To get this, we use the ucontext_t argument to the signal-handler
// callback, which holds the full context of what was going on when
// the signal triggered. How to get from a ucontext_t to a Program
// Counter is OS-dependent.
#ifndef BASE_GETPC_H_
#define BASE_GETPC_H_
#include "config.h"
// On many linux systems, we may need _GNU_SOURCE to get access to
// the defined constants that define the register we want to see (eg
// REG_EIP). Note this #define must come first!
#define _GNU_SOURCE 1
// If #define _GNU_SOURCE causes problems, this might work instead.
// It will cause problems for FreeBSD though!, because it turns off
// the needed __BSD_VISIBLE.
//#define _XOPEN_SOURCE 500
#include <string.h> // for memcmp
#if defined(HAVE_SYS_UCONTEXT_H)
#include <sys/ucontext.h>
#elif defined(HAVE_UCONTEXT_H)
#include <ucontext.h> // for ucontext_t (and also mcontext_t)
#elif defined(HAVE_CYGWIN_SIGNAL_H)
#include <cygwin/signal.h>
typedef ucontext ucontext_t;
#endif
// Take the example where function Foo() calls function Bar(). For
// many architectures, Bar() is responsible for setting up and tearing
// down its own stack frame. In that case, it's possible for the
// interrupt to happen when execution is in Bar(), but the stack frame
// is not properly set up (either before it's done being set up, or
// after it's been torn down but before Bar() returns). In those
// cases, the stack trace cannot see the caller function anymore.
//
// GetPC can try to identify this situation, on architectures where it
// might occur, and unwind the current function call in that case to
// avoid false edges in the profile graph (that is, edges that appear
// to show a call skipping over a function). To do this, we hard-code
// in the asm instructions we might see when setting up or tearing
// down a stack frame.
//
// This is difficult to get right: the instructions depend on the
// processor, the compiler ABI, and even the optimization level. This
// is a best effort patch -- if we fail to detect such a situation, or
// mess up the PC, nothing happens; the returned PC is not used for
// any further processing.
struct CallUnrollInfo {
// Offset from (e)ip register where this instruction sequence
// should be matched. Interpreted as bytes. Offset 0 is the next
// instruction to execute. Be extra careful with negative offsets in
// architectures of variable instruction length (like x86) - it is
// not that easy as taking an offset to step one instruction back!
int pc_offset;
// The actual instruction bytes. Feel free to make it larger if you
// need a longer sequence.
unsigned char ins[16];
// How many bytes to match from ins array?
int ins_size;
// The offset from the stack pointer (e)sp where to look for the
// call return address. Interpreted as bytes.
int return_sp_offset;
};
// The dereferences needed to get the PC from a struct ucontext were
// determined at configure time, and stored in the macro
// PC_FROM_UCONTEXT in config.h. The only thing we need to do here,
// then, is to do the magic call-unrolling for systems that support it.
// -- Special case 1: linux x86, for which we have CallUnrollInfo
#if defined(__linux) && defined(__i386) && defined(__GNUC__)
static const CallUnrollInfo callunrollinfo[] = {
// Entry to a function: push %ebp; mov %esp,%ebp
// Top-of-stack contains the caller IP.
{ 0,
{0x55, 0x89, 0xe5}, 3,
0
},
// Entry to a function, second instruction: push %ebp; mov %esp,%ebp
// Top-of-stack contains the old frame, caller IP is +4.
{ -1,
{0x55, 0x89, 0xe5}, 3,
4
},
// Return from a function: RET.
// Top-of-stack contains the caller IP.
{ 0,
{0xc3}, 1,
0
}
};
inline void* GetPC(const ucontext_t& signal_ucontext) {
// See comment above struct CallUnrollInfo. Only try instruction
// flow matching if both eip and esp looks reasonable.
const int eip = signal_ucontext.uc_mcontext.gregs[REG_EIP];
const int esp = signal_ucontext.uc_mcontext.gregs[REG_ESP];
if ((eip & 0xffff0000) != 0 && (~eip & 0xffff0000) != 0 &&
(esp & 0xffff0000) != 0) {
char* eip_char = reinterpret_cast<char*>(eip);
for (int i = 0; i < sizeof(callunrollinfo)/sizeof(*callunrollinfo); ++i) {
if (!memcmp(eip_char + callunrollinfo[i].pc_offset,
callunrollinfo[i].ins, callunrollinfo[i].ins_size)) {
// We have a match.
void **retaddr = (void**)(esp + callunrollinfo[i].return_sp_offset);
return *retaddr;
}
}
}
return (void*)eip;
}
// Special case #2: Windows, which has to do something totally different.
#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__MINGW32__)
// If this is ever implemented, probably the way to do it is to have
// profiler.cc use a high-precision timer via timeSetEvent:
// http://msdn2.microsoft.com/en-us/library/ms712713.aspx
// We'd use it in mode TIME_CALLBACK_FUNCTION/TIME_PERIODIC.
// The callback function would be something like prof_handler, but
// alas the arguments are different: no ucontext_t! I don't know
// how we'd get the PC (using StackWalk64?)
// http://msdn2.microsoft.com/en-us/library/ms680650.aspx
#include "base/logging.h" // for RAW_LOG
#ifndef HAVE_CYGWIN_SIGNAL_H
typedef int ucontext_t;
#endif
inline void* GetPC(const struct ucontext_t& signal_ucontext) {
RAW_LOG(ERROR, "GetPC is not yet implemented on Windows\n");
return NULL;
}
// Normal cases. If this doesn't compile, it's probably because
// PC_FROM_UCONTEXT is the empty string. You need to figure out
// the right value for your system, and add it to the list in
// configure.ac (or set it manually in your config.h).
#else
inline void* GetPC(const ucontext_t& signal_ucontext) {
#if defined(__s390__) && !defined(__s390x__)
// Mask out the AMODE31 bit from the PC recorded in the context.
return (void*)((unsigned long)signal_ucontext.PC_FROM_UCONTEXT & 0x7fffffffUL);
#else
return (void*)signal_ucontext.PC_FROM_UCONTEXT; // defined in config.h
#endif
}
#endif
#endif // BASE_GETPC_H_

View File

@ -1,93 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// All Rights Reserved.
//
// Author: Maxim Lifantsev
//
// A file to ensure that components of heap leak checker run before
// all global object constructors and after all global object
// destructors.
//
// This file must be the last library any binary links against.
// Otherwise, the heap checker may not be able to run early enough to
// catalog all the global objects in your program. If this happens,
// and later in the program you allocate memory and have one of these
// "uncataloged" global objects point to it, the heap checker will
// consider that allocation to be a leak, even though it's not (since
// the allocated object is reachable from global data and hence "live").
#include <stdlib.h> // for abort()
#include <gperftools/malloc_extension.h>
// A dummy variable to refer from heap-checker.cc. This is to make
// sure this file is not optimized out by the linker.
bool heap_leak_checker_bcad_variable;
extern void HeapLeakChecker_AfterDestructors(); // in heap-checker.cc
// A helper class to ensure that some components of heap leak checking
// can happen before construction and after destruction
// of all global/static objects.
class HeapLeakCheckerGlobalPrePost {
public:
HeapLeakCheckerGlobalPrePost() {
if (count_ == 0) {
// The 'new int' will ensure that we have run an initial malloc
// hook, which will set up the heap checker via
// MallocHook_InitAtFirstAllocation_HeapLeakChecker. See malloc_hook.cc.
// This is done in this roundabout fashion in order to avoid self-deadlock
// if we directly called HeapLeakChecker_BeforeConstructors here.
delete new int;
// This needs to be called before the first allocation of an STL
// object, but after libc is done setting up threads (because it
// calls setenv, which requires a thread-aware errno). By
// putting it here, we hope it's the first bit of code executed
// after the libc global-constructor code.
MallocExtension::Initialize();
}
++count_;
}
~HeapLeakCheckerGlobalPrePost() {
if (count_ <= 0) abort();
--count_;
if (count_ == 0) HeapLeakChecker_AfterDestructors();
}
private:
// Counter of constructions/destructions of objects of this class
// (just in case there are more than one of them).
static int count_;
};
int HeapLeakCheckerGlobalPrePost::count_ = 0;
// The early-construction/late-destruction global object.
static const HeapLeakCheckerGlobalPrePost heap_leak_checker_global_pre_post;

File diff suppressed because it is too large Load Diff

View File

@ -1,78 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2013, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file defines structs to accumulate memory allocation and deallocation
// counts. These structs are commonly used for malloc (in HeapProfileTable)
// and mmap (in MemoryRegionMap).
// A bucket is data structure for heap profiling to store a pair of a stack
// trace and counts of (de)allocation. Buckets are stored in a hash table
// which is declared as "HeapProfileBucket**".
//
// A hash value is computed from a stack trace. Collision in the hash table
// is resolved by separate chaining with linked lists. The links in the list
// are implemented with the member "HeapProfileBucket* next".
//
// A structure of a hash table HeapProfileBucket** bucket_table would be like:
// bucket_table[0] => NULL
// bucket_table[1] => HeapProfileBucket() => HeapProfileBucket() => NULL
// ...
// bucket_table[i] => HeapProfileBucket() => NULL
// ...
// bucket_table[n] => HeapProfileBucket() => NULL
#ifndef HEAP_PROFILE_STATS_H_
#define HEAP_PROFILE_STATS_H_
struct HeapProfileStats {
// Returns true if the two HeapProfileStats are semantically equal.
bool Equivalent(const HeapProfileStats& other) const {
return allocs - frees == other.allocs - other.frees &&
alloc_size - free_size == other.alloc_size - other.free_size;
}
int32 allocs; // Number of allocation calls.
int32 frees; // Number of free calls.
int64 alloc_size; // Total size of all allocated objects so far.
int64 free_size; // Total size of all freed objects so far.
};
// Allocation and deallocation statistics per each stack trace.
struct HeapProfileBucket : public HeapProfileStats {
// Longest stack trace we record.
static const int kMaxStackDepth = 32;
uintptr_t hash; // Hash value of the stack trace.
int depth; // Depth of stack trace.
const void** stack; // Stack trace.
HeapProfileBucket* next; // Next entry in hash-table.
};
#endif // HEAP_PROFILE_STATS_H_

View File

@ -1,631 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
// Maxim Lifantsev (refactoring)
//
#include <config.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for write()
#endif
#include <fcntl.h> // for open()
#ifdef HAVE_GLOB_H
#include <glob.h>
#ifndef GLOB_NOMATCH // true on some old cygwins
# define GLOB_NOMATCH 0
#endif
#endif
#ifdef HAVE_INTTYPES_H
#include <inttypes.h> // for PRIxPTR
#endif
#ifdef HAVE_POLL_H
#include <poll.h>
#endif
#include <errno.h>
#include <stdarg.h>
#include <string>
#include <map>
#include <algorithm> // for sort(), equal(), and copy()
#include "heap-profile-table.h"
#include "base/logging.h"
#include "raw_printer.h"
#include "symbolize.h"
#include <gperftools/stacktrace.h>
#include <gperftools/malloc_hook.h>
#include "memory_region_map.h"
#include "base/commandlineflags.h"
#include "base/logging.h" // for the RawFD I/O commands
#include "base/sysinfo.h"
using std::sort;
using std::equal;
using std::copy;
using std::string;
using std::map;
using tcmalloc::FillProcSelfMaps; // from sysinfo.h
using tcmalloc::DumpProcSelfMaps; // from sysinfo.h
//----------------------------------------------------------------------
DEFINE_bool(cleanup_old_heap_profiles,
EnvToBool("HEAP_PROFILE_CLEANUP", true),
"At initialization time, delete old heap profiles.");
DEFINE_int32(heap_check_max_leaks,
EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
"The maximum number of leak reports to print.");
//----------------------------------------------------------------------
// header of the dumped heap profile
static const char kProfileHeader[] = "heap profile: ";
static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
//----------------------------------------------------------------------
const char HeapProfileTable::kFileExt[] = ".heap";
//----------------------------------------------------------------------
static const int kHashTableSize = 179999; // Size for bucket_table_.
/*static*/ const int HeapProfileTable::kMaxStackDepth;
//----------------------------------------------------------------------
// We strip out different number of stack frames in debug mode
// because less inlining happens in that case
#ifdef NDEBUG
static const int kStripFrames = 2;
#else
static const int kStripFrames = 3;
#endif
// For sorting Stats or Buckets by in-use space
static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
HeapProfileTable::Stats* b) {
// Return true iff "a" has more allocated space than "b"
return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
}
//----------------------------------------------------------------------
HeapProfileTable::HeapProfileTable(Allocator alloc,
DeAllocator dealloc,
bool profile_mmap)
: alloc_(alloc),
dealloc_(dealloc),
profile_mmap_(profile_mmap),
bucket_table_(NULL),
num_buckets_(0),
address_map_(NULL) {
// Make a hash table for buckets.
const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes));
memset(bucket_table_, 0, table_bytes);
// Make an allocation map.
address_map_ =
new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
// Initialize.
memset(&total_, 0, sizeof(total_));
num_buckets_ = 0;
}
HeapProfileTable::~HeapProfileTable() {
// Free the allocation map.
address_map_->~AllocationMap();
dealloc_(address_map_);
address_map_ = NULL;
// Free the hash table.
for (int i = 0; i < kHashTableSize; i++) {
for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) {
Bucket* bucket = curr;
curr = curr->next;
dealloc_(bucket->stack);
dealloc_(bucket);
}
}
dealloc_(bucket_table_);
bucket_table_ = NULL;
}
HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
const void* const key[]) {
// Make hash-value
uintptr_t h = 0;
for (int i = 0; i < depth; i++) {
h += reinterpret_cast<uintptr_t>(key[i]);
h += h << 10;
h ^= h >> 6;
}
h += h << 3;
h ^= h >> 11;
// Lookup stack trace in table
unsigned int buck = ((unsigned int) h) % kHashTableSize;
for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) {
if ((b->hash == h) &&
(b->depth == depth) &&
equal(key, key + depth, b->stack)) {
return b;
}
}
// Create new bucket
const size_t key_size = sizeof(key[0]) * depth;
const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
copy(key, key + depth, kcopy);
Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
memset(b, 0, sizeof(*b));
b->hash = h;
b->depth = depth;
b->stack = kcopy;
b->next = bucket_table_[buck];
bucket_table_[buck] = b;
num_buckets_++;
return b;
}
int HeapProfileTable::GetCallerStackTrace(
int skip_count, void* stack[kMaxStackDepth]) {
return MallocHook::GetCallerStackTrace(
stack, kMaxStackDepth, kStripFrames + skip_count + 1);
}
void HeapProfileTable::RecordAlloc(
const void* ptr, size_t bytes, int stack_depth,
const void* const call_stack[]) {
Bucket* b = GetBucket(stack_depth, call_stack);
b->allocs++;
b->alloc_size += bytes;
total_.allocs++;
total_.alloc_size += bytes;
AllocValue v;
v.set_bucket(b); // also did set_live(false); set_ignore(false)
v.bytes = bytes;
address_map_->Insert(ptr, v);
}
void HeapProfileTable::RecordFree(const void* ptr) {
AllocValue v;
if (address_map_->FindAndRemove(ptr, &v)) {
Bucket* b = v.bucket();
b->frees++;
b->free_size += v.bytes;
total_.frees++;
total_.free_size += v.bytes;
}
}
bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
const AllocValue* alloc_value = address_map_->Find(ptr);
if (alloc_value != NULL) *object_size = alloc_value->bytes;
return alloc_value != NULL;
}
bool HeapProfileTable::FindAllocDetails(const void* ptr,
AllocInfo* info) const {
const AllocValue* alloc_value = address_map_->Find(ptr);
if (alloc_value != NULL) {
info->object_size = alloc_value->bytes;
info->call_stack = alloc_value->bucket()->stack;
info->stack_depth = alloc_value->bucket()->depth;
}
return alloc_value != NULL;
}
bool HeapProfileTable::FindInsideAlloc(const void* ptr,
size_t max_size,
const void** object_ptr,
size_t* object_size) const {
const AllocValue* alloc_value =
address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
if (alloc_value != NULL) *object_size = alloc_value->bytes;
return alloc_value != NULL;
}
bool HeapProfileTable::MarkAsLive(const void* ptr) {
AllocValue* alloc = address_map_->FindMutable(ptr);
if (alloc && !alloc->live()) {
alloc->set_live(true);
return true;
}
return false;
}
void HeapProfileTable::MarkAsIgnored(const void* ptr) {
AllocValue* alloc = address_map_->FindMutable(ptr);
if (alloc) {
alloc->set_ignore(true);
}
}
// We'd be happier using snprintfer, but we don't to reduce dependencies.
int HeapProfileTable::UnparseBucket(const Bucket& b,
char* buf, int buflen, int bufsize,
const char* extra,
Stats* profile_stats) {
if (profile_stats != NULL) {
profile_stats->allocs += b.allocs;
profile_stats->alloc_size += b.alloc_size;
profile_stats->frees += b.frees;
profile_stats->free_size += b.free_size;
}
int printed =
snprintf(buf + buflen, bufsize - buflen, "%6d: %8" PRId64 " [%6d: %8" PRId64 "] @%s",
b.allocs - b.frees,
b.alloc_size - b.free_size,
b.allocs,
b.alloc_size,
extra);
// If it looks like the snprintf failed, ignore the fact we printed anything
if (printed < 0 || printed >= bufsize - buflen) return buflen;
buflen += printed;
for (int d = 0; d < b.depth; d++) {
printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
reinterpret_cast<uintptr_t>(b.stack[d]));
if (printed < 0 || printed >= bufsize - buflen) return buflen;
buflen += printed;
}
printed = snprintf(buf + buflen, bufsize - buflen, "\n");
if (printed < 0 || printed >= bufsize - buflen) return buflen;
buflen += printed;
return buflen;
}
HeapProfileTable::Bucket**
HeapProfileTable::MakeSortedBucketList() const {
Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_));
int bucket_count = 0;
for (int i = 0; i < kHashTableSize; i++) {
for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) {
list[bucket_count++] = curr;
}
}
RAW_DCHECK(bucket_count == num_buckets_, "");
sort(list, list + num_buckets_, ByAllocatedSpace);
return list;
}
void HeapProfileTable::IterateOrderedAllocContexts(
AllocContextIterator callback) const {
Bucket** list = MakeSortedBucketList();
AllocContextInfo info;
for (int i = 0; i < num_buckets_; ++i) {
*static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
info.stack_depth = list[i]->depth;
info.call_stack = list[i]->stack;
callback(info);
}
dealloc_(list);
}
int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
Bucket** list = MakeSortedBucketList();
// Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
// In the cases buf is too small, we'd rather leave out the last
// buckets than leave out the /proc/self/maps info. To ensure that,
// we actually print the /proc/self/maps info first, then move it to
// the end of the buffer, then write the bucket info into whatever
// is remaining, and then move the maps info one last time to close
// any gaps. Whew!
int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
if (map_length < 0 || map_length >= size) {
dealloc_(list);
return 0;
}
bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety?
map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
RAW_DCHECK(map_length <= size, "");
char* const map_start = buf + size - map_length; // move to end
memmove(map_start, buf, map_length);
size -= map_length;
Stats stats;
memset(&stats, 0, sizeof(stats));
int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
if (bucket_length < 0 || bucket_length >= size) {
dealloc_(list);
return 0;
}
bucket_length = UnparseBucket(total_, buf, bucket_length, size,
" heapprofile", &stats);
// Dump the mmap list first.
if (profile_mmap_) {
BufferArgs buffer(buf, bucket_length, size);
MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer);
bucket_length = buffer.buflen;
}
for (int i = 0; i < num_buckets_; i++) {
bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
&stats);
}
RAW_DCHECK(bucket_length < size, "");
dealloc_(list);
RAW_DCHECK(buf + bucket_length <= map_start, "");
memmove(buf + bucket_length, map_start, map_length); // close the gap
return bucket_length + map_length;
}
// static
void HeapProfileTable::DumpBucketIterator(const Bucket* bucket,
BufferArgs* args) {
args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize,
"", NULL);
}
inline
void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
const DumpArgs& args) {
if (v->live()) {
v->set_live(false);
return;
}
if (v->ignore()) {
return;
}
Bucket b;
memset(&b, 0, sizeof(b));
b.allocs = 1;
b.alloc_size = v->bytes;
b.depth = v->bucket()->depth;
b.stack = v->bucket()->stack;
char buf[1024];
int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats);
RawWrite(args.fd, buf, len);
}
// Callback from NonLiveSnapshot; adds entry to arg->dest
// if not the entry is not live and is not present in arg->base.
void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
AddNonLiveArgs* arg) {
if (v->live()) {
v->set_live(false);
} else {
if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) {
// Present in arg->base, so do not save
} else {
arg->dest->Add(ptr, *v);
}
}
}
bool HeapProfileTable::WriteProfile(const char* file_name,
const Bucket& total,
AllocationMap* allocations) {
RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
RawFD fd = RawOpenForWriting(file_name);
if (fd != kIllegalRawFD) {
RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
char buf[512];
int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
NULL);
RawWrite(fd, buf, len);
const DumpArgs args(fd, NULL);
allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
DumpProcSelfMaps(fd);
RawClose(fd);
return true;
} else {
RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
return false;
}
}
void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
if (!FLAGS_cleanup_old_heap_profiles)
return;
string pattern = string(prefix) + ".*" + kFileExt;
#if defined(HAVE_GLOB_H)
glob_t g;
const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
if (r == 0 || r == GLOB_NOMATCH) {
const int prefix_length = strlen(prefix);
for (int i = 0; i < g.gl_pathc; i++) {
const char* fname = g.gl_pathv[i];
if ((strlen(fname) >= prefix_length) &&
(memcmp(fname, prefix, prefix_length) == 0)) {
RAW_VLOG(1, "Removing old heap profile %s", fname);
unlink(fname);
}
}
}
globfree(&g);
#else /* HAVE_GLOB_H */
RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
#endif
}
HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
address_map_->Iterate(AddToSnapshot, s);
return s;
}
void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
s->~Snapshot();
dealloc_(s);
}
// Callback from TakeSnapshot; adds a single entry to snapshot
void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v,
Snapshot* snapshot) {
snapshot->Add(ptr, *v);
}
HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
Snapshot* base) {
RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
int(total_.allocs - total_.frees),
int(total_.alloc_size - total_.free_size));
Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
AddNonLiveArgs args;
args.dest = s;
args.base = base;
address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
int(s->total_.allocs - s->total_.frees),
int(s->total_.alloc_size - s->total_.free_size));
return s;
}
// Information kept per unique bucket seen
struct HeapProfileTable::Snapshot::Entry {
int count;
int bytes;
Bucket* bucket;
Entry() : count(0), bytes(0) { }
// Order by decreasing bytes
bool operator<(const Entry& x) const {
return this->bytes > x.bytes;
}
};
// State used to generate leak report. We keep a mapping from Bucket pointer
// the collected stats for that bucket.
struct HeapProfileTable::Snapshot::ReportState {
map<Bucket*, Entry> buckets_;
};
// Callback from ReportLeaks; updates ReportState.
void HeapProfileTable::Snapshot::ReportCallback(const void* ptr,
AllocValue* v,
ReportState* state) {
Entry* e = &state->buckets_[v->bucket()]; // Creates empty Entry first time
e->bucket = v->bucket();
e->count++;
e->bytes += v->bytes;
}
void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
const char* filename,
bool should_symbolize) {
// This is only used by the heap leak checker, but is intimately
// tied to the allocation map that belongs in this module and is
// therefore placed here.
RAW_LOG(ERROR, "Leak check %s detected leaks of %" PRIuS " bytes "
"in %" PRIuS " objects",
checker_name,
size_t(total_.alloc_size),
size_t(total_.allocs));
// Group objects by Bucket
ReportState state;
map_.Iterate(&ReportCallback, &state);
// Sort buckets by decreasing leaked size
const int n = state.buckets_.size();
Entry* entries = new Entry[n];
int dst = 0;
for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin();
iter != state.buckets_.end();
++iter) {
entries[dst++] = iter->second;
}
sort(entries, entries + n);
// Report a bounded number of leaks to keep the leak report from
// growing too long.
const int to_report =
(FLAGS_heap_check_max_leaks > 0 &&
n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
RAW_LOG(ERROR, "The %d largest leaks:", to_report);
// Print
SymbolTable symbolization_table;
for (int i = 0; i < to_report; i++) {
const Entry& e = entries[i];
for (int j = 0; j < e.bucket->depth; j++) {
symbolization_table.Add(e.bucket->stack[j]);
}
}
static const int kBufSize = 2<<10;
char buffer[kBufSize];
if (should_symbolize)
symbolization_table.Symbolize();
for (int i = 0; i < to_report; i++) {
const Entry& e = entries[i];
base::RawPrinter printer(buffer, kBufSize);
printer.Printf("Leak of %d bytes in %d objects allocated from:\n",
e.bytes, e.count);
for (int j = 0; j < e.bucket->depth; j++) {
const void* pc = e.bucket->stack[j];
printer.Printf("\t@ %" PRIxPTR " %s\n",
reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
}
RAW_LOG(ERROR, "%s", buffer);
}
if (to_report < n) {
RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
to_report, n-1);
}
delete[] entries;
// TODO: Dump the sorted Entry list instead of dumping raw data?
// (should be much shorter)
if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
}
}
void HeapProfileTable::Snapshot::ReportObject(const void* ptr,
AllocValue* v,
char* unused) {
// Perhaps also log the allocation stack trace (unsymbolized)
// on this line in case somebody finds it useful.
RAW_LOG(ERROR, "leaked %" PRIuS " byte object %p", v->bytes, ptr);
}
void HeapProfileTable::Snapshot::ReportIndividualObjects() {
char unused;
map_.Iterate(ReportObject, &unused);
}

View File

@ -1,399 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
// Maxim Lifantsev (refactoring)
//
#ifndef BASE_HEAP_PROFILE_TABLE_H_
#define BASE_HEAP_PROFILE_TABLE_H_
#include "addressmap-inl.h"
#include "base/basictypes.h"
#include "base/logging.h" // for RawFD
#include "heap-profile-stats.h"
// Table to maintain a heap profile data inside,
// i.e. the set of currently active heap memory allocations.
// thread-unsafe and non-reentrant code:
// each instance object must be used by one thread
// at a time w/o self-recursion.
//
// TODO(maxim): add a unittest for this class.
class HeapProfileTable {
public:
// Extension to be used for heap pforile files.
static const char kFileExt[];
// Longest stack trace we record.
static const int kMaxStackDepth = 32;
// data types ----------------------------
// Profile stats.
typedef HeapProfileStats Stats;
// Info we can return about an allocation.
struct AllocInfo {
size_t object_size; // size of the allocation
const void* const* call_stack; // call stack that made the allocation call
int stack_depth; // depth of call_stack
bool live;
bool ignored;
};
// Info we return about an allocation context.
// An allocation context is a unique caller stack trace
// of an allocation operation.
struct AllocContextInfo : public Stats {
int stack_depth; // Depth of stack trace
const void* const* call_stack; // Stack trace
};
// Memory (de)allocator interface we'll use.
typedef void* (*Allocator)(size_t size);
typedef void (*DeAllocator)(void* ptr);
// interface ---------------------------
HeapProfileTable(Allocator alloc, DeAllocator dealloc, bool profile_mmap);
~HeapProfileTable();
// Collect the stack trace for the function that asked to do the
// allocation for passing to RecordAlloc() below.
//
// The stack trace is stored in 'stack'. The stack depth is returned.
//
// 'skip_count' gives the number of stack frames between this call
// and the memory allocation function.
static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]);
// Record an allocation at 'ptr' of 'bytes' bytes. 'stack_depth'
// and 'call_stack' identifying the function that requested the
// allocation. They can be generated using GetCallerStackTrace() above.
void RecordAlloc(const void* ptr, size_t bytes,
int stack_depth, const void* const call_stack[]);
// Record the deallocation of memory at 'ptr'.
void RecordFree(const void* ptr);
// Return true iff we have recorded an allocation at 'ptr'.
// If yes, fill *object_size with the allocation byte size.
bool FindAlloc(const void* ptr, size_t* object_size) const;
// Same as FindAlloc, but fills all of *info.
bool FindAllocDetails(const void* ptr, AllocInfo* info) const;
// Return true iff "ptr" points into a recorded allocation
// If yes, fill *object_ptr with the actual allocation address
// and *object_size with the allocation byte size.
// max_size specifies largest currently possible allocation size.
bool FindInsideAlloc(const void* ptr, size_t max_size,
const void** object_ptr, size_t* object_size) const;
// If "ptr" points to a recorded allocation and it's not marked as live
// mark it as live and return true. Else return false.
// All allocations start as non-live.
bool MarkAsLive(const void* ptr);
// If "ptr" points to a recorded allocation, mark it as "ignored".
// Ignored objects are treated like other objects, except that they
// are skipped in heap checking reports.
void MarkAsIgnored(const void* ptr);
// Return current total (de)allocation statistics. It doesn't contain
// mmap'ed regions.
const Stats& total() const { return total_; }
// Allocation data iteration callback: gets passed object pointer and
// fully-filled AllocInfo.
typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info);
// Iterate over the allocation profile data calling "callback"
// for every allocation.
void IterateAllocs(AllocIterator callback) const {
address_map_->Iterate(MapArgsAllocIterator, callback);
}
// Allocation context profile data iteration callback
typedef void (*AllocContextIterator)(const AllocContextInfo& info);
// Iterate over the allocation context profile data calling "callback"
// for every allocation context. Allocation contexts are ordered by the
// size of allocated space.
void IterateOrderedAllocContexts(AllocContextIterator callback) const;
// Fill profile data into buffer 'buf' of size 'size'
// and return the actual size occupied by the dump in 'buf'.
// The profile buckets are dumped in the decreasing order
// of currently allocated bytes.
// We do not provision for 0-terminating 'buf'.
int FillOrderedProfile(char buf[], int size) const;
// Cleanup any old profile files matching prefix + ".*" + kFileExt.
static void CleanupOldProfiles(const char* prefix);
// Return a snapshot of the current contents of *this.
// Caller must call ReleaseSnapshot() on result when no longer needed.
// The result is only valid while this exists and until
// the snapshot is discarded by calling ReleaseSnapshot().
class Snapshot;
Snapshot* TakeSnapshot();
// Release a previously taken snapshot. snapshot must not
// be used after this call.
void ReleaseSnapshot(Snapshot* snapshot);
// Return a snapshot of every non-live, non-ignored object in *this.
// If "base" is non-NULL, skip any objects present in "base".
// As a side-effect, clears the "live" bit on every live object in *this.
// Caller must call ReleaseSnapshot() on result when no longer needed.
Snapshot* NonLiveSnapshot(Snapshot* base);
private:
// data types ----------------------------
// Hash table bucket to hold (de)allocation stats
// for a given allocation call stack trace.
typedef HeapProfileBucket Bucket;
// Info stored in the address map
struct AllocValue {
// Access to the stack-trace bucket
Bucket* bucket() const {
return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask));
}
// This also does set_live(false).
void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); }
size_t bytes; // Number of bytes in this allocation
// Access to the allocation liveness flag (for leak checking)
bool live() const { return bucket_rep & kLive; }
void set_live(bool l) {
bucket_rep = (bucket_rep & ~uintptr_t(kLive)) | (l ? kLive : 0);
}
// Should this allocation be ignored if it looks like a leak?
bool ignore() const { return bucket_rep & kIgnore; }
void set_ignore(bool r) {
bucket_rep = (bucket_rep & ~uintptr_t(kIgnore)) | (r ? kIgnore : 0);
}
private:
// We store a few bits in the bottom bits of bucket_rep.
// (Alignment is at least four, so we have at least two bits.)
static const int kLive = 1;
static const int kIgnore = 2;
static const int kMask = kLive | kIgnore;
uintptr_t bucket_rep;
};
// helper for FindInsideAlloc
static size_t AllocValueSize(const AllocValue& v) { return v.bytes; }
typedef AddressMap<AllocValue> AllocationMap;
// Arguments that need to be passed DumpBucketIterator callback below.
struct BufferArgs {
BufferArgs(char* buf_arg, int buflen_arg, int bufsize_arg)
: buf(buf_arg),
buflen(buflen_arg),
bufsize(bufsize_arg) {
}
char* buf;
int buflen;
int bufsize;
DISALLOW_COPY_AND_ASSIGN(BufferArgs);
};
// Arguments that need to be passed DumpNonLiveIterator callback below.
struct DumpArgs {
DumpArgs(RawFD fd_arg, Stats* profile_stats_arg)
: fd(fd_arg),
profile_stats(profile_stats_arg) {
}
RawFD fd; // file to write to
Stats* profile_stats; // stats to update (may be NULL)
};
// helpers ----------------------------
// Unparse bucket b and print its portion of profile dump into buf.
// We return the amount of space in buf that we use. We start printing
// at buf + buflen, and promise not to go beyond buf + bufsize.
// We do not provision for 0-terminating 'buf'.
//
// If profile_stats is non-NULL, we update *profile_stats by
// counting bucket b.
//
// "extra" is appended to the unparsed bucket. Typically it is empty,
// but may be set to something like " heapprofile" for the total
// bucket to indicate the type of the profile.
static int UnparseBucket(const Bucket& b,
char* buf, int buflen, int bufsize,
const char* extra,
Stats* profile_stats);
// Get the bucket for the caller stack trace 'key' of depth 'depth'
// creating the bucket if needed.
Bucket* GetBucket(int depth, const void* const key[]);
// Helper for IterateAllocs to do callback signature conversion
// from AllocationMap::Iterate to AllocIterator.
static void MapArgsAllocIterator(const void* ptr, AllocValue* v,
AllocIterator callback) {
AllocInfo info;
info.object_size = v->bytes;
info.call_stack = v->bucket()->stack;
info.stack_depth = v->bucket()->depth;
info.live = v->live();
info.ignored = v->ignore();
callback(ptr, info);
}
// Helper to dump a bucket.
inline static void DumpBucketIterator(const Bucket* bucket,
BufferArgs* args);
// Helper for DumpNonLiveProfile to do object-granularity
// heap profile dumping. It gets passed to AllocationMap::Iterate.
inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v,
const DumpArgs& args);
// Helper for IterateOrderedAllocContexts and FillOrderedProfile.
// Creates a sorted list of Buckets whose length is num_buckets_.
// The caller is responsible for deallocating the returned list.
Bucket** MakeSortedBucketList() const;
// Helper for TakeSnapshot. Saves object to snapshot.
static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s);
// Arguments passed to AddIfNonLive
struct AddNonLiveArgs {
Snapshot* dest;
Snapshot* base;
};
// Helper for NonLiveSnapshot. Adds the object to the destination
// snapshot if it is non-live.
static void AddIfNonLive(const void* ptr, AllocValue* v,
AddNonLiveArgs* arg);
// Write contents of "*allocations" as a heap profile to
// "file_name". "total" must contain the total of all entries in
// "*allocations".
static bool WriteProfile(const char* file_name,
const Bucket& total,
AllocationMap* allocations);
// data ----------------------------
// Memory (de)allocator that we use.
Allocator alloc_;
DeAllocator dealloc_;
// Overall profile stats; we use only the Stats part,
// but make it a Bucket to pass to UnparseBucket.
Bucket total_;
bool profile_mmap_;
// Bucket hash table for malloc.
// We hand-craft one instead of using one of the pre-written
// ones because we do not want to use malloc when operating on the table.
// It is only few lines of code, so no big deal.
Bucket** bucket_table_;
int num_buckets_;
// Map of all currently allocated objects and mapped regions we know about.
AllocationMap* address_map_;
DISALLOW_COPY_AND_ASSIGN(HeapProfileTable);
};
class HeapProfileTable::Snapshot {
public:
const Stats& total() const { return total_; }
// Report anything in this snapshot as a leak.
// May use new/delete for temporary storage.
// If should_symbolize is true, will fork (which is not threadsafe)
// to turn addresses into symbol names. Set to false for maximum safety.
// Also writes a heap profile to "filename" that contains
// all of the objects in this snapshot.
void ReportLeaks(const char* checker_name, const char* filename,
bool should_symbolize);
// Report the addresses of all leaked objects.
// May use new/delete for temporary storage.
void ReportIndividualObjects();
bool Empty() const {
return (total_.allocs == 0) && (total_.alloc_size == 0);
}
private:
friend class HeapProfileTable;
// Total count/size are stored in a Bucket so we can reuse UnparseBucket
Bucket total_;
// We share the Buckets managed by the parent table, but have our
// own object->bucket map.
AllocationMap map_;
Snapshot(Allocator alloc, DeAllocator dealloc) : map_(alloc, dealloc) {
memset(&total_, 0, sizeof(total_));
}
// Callback used to populate a Snapshot object with entries found
// in another allocation map.
inline void Add(const void* ptr, const AllocValue& v) {
map_.Insert(ptr, v);
total_.allocs++;
total_.alloc_size += v.bytes;
}
// Helpers for sorting and generating leak reports
struct Entry;
struct ReportState;
static void ReportCallback(const void* ptr, AllocValue* v, ReportState*);
static void ReportObject(const void* ptr, AllocValue* v, char*);
DISALLOW_COPY_AND_ASSIGN(Snapshot);
};
#endif // BASE_HEAP_PROFILE_TABLE_H_

View File

@ -1,192 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Sanjay Ghemawat <opensource@google.com>
#include "config.h"
#include "internal_logging.h"
#include <stdarg.h> // for va_end, va_start
#include <stdio.h> // for vsnprintf, va_list, etc
#include <stdlib.h> // for abort
#include <string.h> // for strlen, memcpy
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for write()
#endif
#include <gperftools/malloc_extension.h>
#include "base/logging.h" // for perftools_vsnprintf
#include "base/spinlock.h" // for SpinLockHolder, SpinLock
// Variables for storing crash output. Allocated statically since we
// may not be able to heap-allocate while crashing.
static SpinLock crash_lock(base::LINKER_INITIALIZED);
static bool crashed = false;
static const int kStatsBufferSize = 16 << 10;
static char stats_buffer[kStatsBufferSize] = { 0 };
namespace tcmalloc {
static void WriteMessage(const char* msg, int length) {
write(STDERR_FILENO, msg, length);
}
void (*log_message_writer)(const char* msg, int length) = WriteMessage;
class Logger {
public:
bool Add(const LogItem& item);
bool AddStr(const char* str, int n);
bool AddNum(uint64_t num, int base); // base must be 10 or 16.
static const int kBufSize = 200;
char* p_;
char* end_;
char buf_[kBufSize];
};
void Log(LogMode mode, const char* filename, int line,
LogItem a, LogItem b, LogItem c, LogItem d) {
Logger state;
state.p_ = state.buf_;
state.end_ = state.buf_ + sizeof(state.buf_);
state.AddStr(filename, strlen(filename))
&& state.AddStr(":", 1)
&& state.AddNum(line, 10)
&& state.AddStr("]", 1)
&& state.Add(a)
&& state.Add(b)
&& state.Add(c)
&& state.Add(d);
// Teminate with newline
if (state.p_ >= state.end_) {
state.p_ = state.end_ - 1;
}
*state.p_ = '\n';
state.p_++;
int msglen = state.p_ - state.buf_;
if (mode == kLog) {
(*log_message_writer)(state.buf_, msglen);
return;
}
bool first_crash = false;
{
SpinLockHolder l(&crash_lock);
if (!crashed) {
crashed = true;
first_crash = true;
}
}
(*log_message_writer)(state.buf_, msglen);
if (first_crash && mode == kCrashWithStats) {
MallocExtension::instance()->GetStats(stats_buffer, kStatsBufferSize);
(*log_message_writer)(stats_buffer, strlen(stats_buffer));
}
abort();
}
bool Logger::Add(const LogItem& item) {
// Separate items with spaces
if (p_ < end_) {
*p_ = ' ';
p_++;
}
switch (item.tag_) {
case LogItem::kStr:
return AddStr(item.u_.str, strlen(item.u_.str));
case LogItem::kUnsigned:
return AddNum(item.u_.unum, 10);
case LogItem::kSigned:
if (item.u_.snum < 0) {
// The cast to uint64_t is intentionally before the negation
// so that we do not attempt to negate -2^63.
return AddStr("-", 1)
&& AddNum(- static_cast<uint64_t>(item.u_.snum), 10);
} else {
return AddNum(static_cast<uint64_t>(item.u_.snum), 10);
}
case LogItem::kPtr:
return AddStr("0x", 2)
&& AddNum(reinterpret_cast<uintptr_t>(item.u_.ptr), 16);
default:
return false;
}
}
bool Logger::AddStr(const char* str, int n) {
if (end_ - p_ < n) {
return false;
} else {
memcpy(p_, str, n);
p_ += n;
return true;
}
}
bool Logger::AddNum(uint64_t num, int base) {
static const char kDigits[] = "0123456789abcdef";
char space[22]; // more than enough for 2^64 in smallest supported base (10)
char* end = space + sizeof(space);
char* pos = end;
do {
pos--;
*pos = kDigits[num % base];
num /= base;
} while (num > 0 && pos > space);
return AddStr(pos, end - pos);
}
} // end tcmalloc namespace
void TCMalloc_Printer::printf(const char* format, ...) {
if (left_ > 0) {
va_list ap;
va_start(ap, format);
const int r = perftools_vsnprintf(buf_, left_, format, ap);
va_end(ap);
if (r < 0) {
// Perhaps an old glibc that returns -1 on truncation?
left_ = 0;
} else if (r > left_) {
// Truncation
left_ = 0;
} else {
left_ -= r;
buf_ += r;
}
}
}

View File

@ -1,144 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
//
// Internal logging and related utility routines.
#ifndef TCMALLOC_INTERNAL_LOGGING_H_
#define TCMALLOC_INTERNAL_LOGGING_H_
#include "config.h"
#include <stddef.h> // for size_t
#if defined HAVE_STDINT_H
#include <stdint.h>
#elif defined HAVE_INTTYPES_H
#include <inttypes.h>
#else
#include <sys/types.h>
#endif
//-------------------------------------------------------------------
// Utility routines
//-------------------------------------------------------------------
// Safe logging helper: we write directly to the stderr file
// descriptor and avoid FILE buffering because that may invoke
// malloc().
//
// Example:
// Log(kLog, __FILE__, __LINE__, "error", bytes);
namespace tcmalloc {
enum LogMode {
kLog, // Just print the message
kCrash, // Print the message and crash
kCrashWithStats // Print the message, some stats, and crash
};
class Logger;
// A LogItem holds any of the argument types that can be passed to Log()
class LogItem {
public:
LogItem() : tag_(kEnd) { }
LogItem(const char* v) : tag_(kStr) { u_.str = v; }
LogItem(int v) : tag_(kSigned) { u_.snum = v; }
LogItem(long v) : tag_(kSigned) { u_.snum = v; }
LogItem(long long v) : tag_(kSigned) { u_.snum = v; }
LogItem(unsigned int v) : tag_(kUnsigned) { u_.unum = v; }
LogItem(unsigned long v) : tag_(kUnsigned) { u_.unum = v; }
LogItem(unsigned long long v) : tag_(kUnsigned) { u_.unum = v; }
LogItem(const void* v) : tag_(kPtr) { u_.ptr = v; }
private:
friend class Logger;
enum Tag {
kStr,
kSigned,
kUnsigned,
kPtr,
kEnd
};
Tag tag_;
union {
const char* str;
const void* ptr;
int64_t snum;
uint64_t unum;
} u_;
};
extern PERFTOOLS_DLL_DECL void Log(LogMode mode, const char* filename, int line,
LogItem a, LogItem b = LogItem(),
LogItem c = LogItem(), LogItem d = LogItem());
// Tests can override this function to collect logging messages.
extern PERFTOOLS_DLL_DECL void (*log_message_writer)(const char* msg, int length);
} // end tcmalloc namespace
// Like assert(), but executed even in NDEBUG mode
#undef CHECK_CONDITION
#define CHECK_CONDITION(cond) \
do { \
if (!(cond)) { \
::tcmalloc::Log(::tcmalloc::kCrash, __FILE__, __LINE__, #cond); \
} \
} while (0)
// Our own version of assert() so we can avoid hanging by trying to do
// all kinds of goofy printing while holding the malloc lock.
#ifndef NDEBUG
#define ASSERT(cond) CHECK_CONDITION(cond)
#else
#define ASSERT(cond) ((void) 0)
#endif
// Print into buffer
class TCMalloc_Printer {
private:
char* buf_; // Where should we write next
int left_; // Space left in buffer (including space for \0)
public:
// REQUIRES: "length > 0"
TCMalloc_Printer(char* buf, int length) : buf_(buf), left_(length) {
buf[0] = '\0';
}
void printf(const char* format, ...)
#ifdef HAVE___ATTRIBUTE__
__attribute__ ((__format__ (__printf__, 2, 3)))
#endif
;
};
#endif // TCMALLOC_INTERNAL_LOGGING_H_

View File

@ -1,91 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Craig Silverstein <opensource@google.com>
//
// This .h file imports the code that causes tcmalloc to override libc
// versions of malloc/free/new/delete/etc. That is, it provides the
// logic that makes it so calls to malloc(10) go through tcmalloc,
// rather than the default (libc) malloc.
//
// This file also provides a method: ReplaceSystemAlloc(), that every
// libc_override_*.h file it #includes is required to provide. This
// is called when first setting up tcmalloc -- that is, when a global
// constructor in tcmalloc.cc is executed -- to do any initialization
// work that may be required for this OS. (Note we cannot entirely
// control when tcmalloc is initialized, and the system may do some
// mallocs and frees before this routine is called.) It may be a
// noop.
//
// Every libc has its own way of doing this, and sometimes the compiler
// matters too, so we have a different file for each libc, and often
// for different compilers and OS's.
#ifndef TCMALLOC_LIBC_OVERRIDE_INL_H_
#define TCMALLOC_LIBC_OVERRIDE_INL_H_
#include "config.h"
#ifdef HAVE_FEATURES_H
#include <features.h> // for __GLIBC__
#endif
#include <gperftools/tcmalloc.h>
static void ReplaceSystemAlloc(); // defined in the .h files below
// For windows, there are two ways to get tcmalloc. If we're
// patching, then src/windows/patch_function.cc will do the necessary
// overriding here. Otherwise, we doing the 'redefine' trick, where
// we remove malloc/new/etc from mscvcrt.dll, and just need to define
// them now.
#if defined(_WIN32) && defined(WIN32_DO_PATCHING)
void PatchWindowsFunctions(); // in src/windows/patch_function.cc
static void ReplaceSystemAlloc() { PatchWindowsFunctions(); }
#elif defined(_WIN32) && !defined(WIN32_DO_PATCHING)
#include "libc_override_redefine.h"
#elif defined(__APPLE__)
#include "libc_override_osx.h"
#elif defined(__GLIBC__)
#include "libc_override_glibc.h"
// Not all gcc systems necessarily support weak symbols, but all the
// ones I know of do, so for now just assume they all do.
#elif defined(__GNUC__)
#include "libc_override_gcc_and_weak.h"
#else
#error Need to add support for your libc/OS here
#endif
#endif // TCMALLOC_LIBC_OVERRIDE_INL_H_

View File

@ -1,172 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Craig Silverstein <opensource@google.com>
//
// Used to override malloc routines on systems that define the
// memory allocation routines to be weak symbols in their libc
// (almost all unix-based systems are like this), on gcc, which
// suppports the 'alias' attribute.
#ifndef TCMALLOC_LIBC_OVERRIDE_GCC_AND_WEAK_INL_H_
#define TCMALLOC_LIBC_OVERRIDE_GCC_AND_WEAK_INL_H_
#ifdef HAVE_SYS_CDEFS_H
#include <sys/cdefs.h> // for __THROW
#endif
#include <gperftools/tcmalloc.h>
#include "getenv_safe.h" // TCMallocGetenvSafe
#include "base/commandlineflags.h"
#ifndef __THROW // I guess we're not on a glibc-like system
# define __THROW // __THROW is just an optimization, so ok to make it ""
#endif
#ifndef __GNUC__
# error libc_override_gcc_and_weak.h is for gcc distributions only.
#endif
#define ALIAS(tc_fn) __attribute__ ((alias (#tc_fn), used))
void* operator new(size_t size)
ALIAS(tc_new);
void operator delete(void* p) noexcept
ALIAS(tc_delete);
void* operator new[](size_t size)
ALIAS(tc_newarray);
void operator delete[](void* p) noexcept
ALIAS(tc_deletearray);
void* operator new(size_t size, const std::nothrow_t& nt) noexcept
ALIAS(tc_new_nothrow);
void* operator new[](size_t size, const std::nothrow_t& nt) noexcept
ALIAS(tc_newarray_nothrow);
void operator delete(void* p, const std::nothrow_t& nt) noexcept
ALIAS(tc_delete_nothrow);
void operator delete[](void* p, const std::nothrow_t& nt) noexcept
ALIAS(tc_deletearray_nothrow);
#if defined(ENABLE_SIZED_DELETE)
void operator delete(void *p, size_t size) throw()
ALIAS(tc_delete_sized);
void operator delete[](void *p, size_t size) throw()
ALIAS(tc_deletearray_sized);
#elif defined(ENABLE_DYNAMIC_SIZED_DELETE) && \
(__GNUC__ * 100 + __GNUC_MINOR__) >= 405
static void delegate_sized_delete(void *p, size_t s) throw() {
(operator delete)(p);
}
static void delegate_sized_deletearray(void *p, size_t s) throw() {
(operator delete[])(p);
}
extern "C" __attribute__((weak))
int tcmalloc_sized_delete_enabled(void);
static bool sized_delete_enabled(void) {
if (tcmalloc_sized_delete_enabled != 0) {
return !!tcmalloc_sized_delete_enabled();
}
const char *flag = TCMallocGetenvSafe("TCMALLOC_ENABLE_SIZED_DELETE");
return tcmalloc::commandlineflags::StringToBool(flag, false);
}
extern "C" {
static void *resolve_delete_sized(void) {
if (sized_delete_enabled()) {
return reinterpret_cast<void *>(tc_delete_sized);
}
return reinterpret_cast<void *>(delegate_sized_delete);
}
static void *resolve_deletearray_sized(void) {
if (sized_delete_enabled()) {
return reinterpret_cast<void *>(tc_deletearray_sized);
}
return reinterpret_cast<void *>(delegate_sized_deletearray);
}
}
void operator delete(void *p, size_t size) throw()
__attribute__((ifunc("resolve_delete_sized")));
void operator delete[](void *p, size_t size) throw()
__attribute__((ifunc("resolve_deletearray_sized")));
#else /* !ENABLE_SIZED_DELETE && !ENABLE_DYN_SIZED_DELETE */
void operator delete(void *p, size_t size) throw()
ALIAS(tc_delete);
void operator delete[](void *p, size_t size) throw()
ALIAS(tc_deletearray);
#endif /* !ENABLE_SIZED_DELETE && !ENABLE_DYN_SIZED_DELETE */
extern "C" {
void* malloc(size_t size) __THROW ALIAS(tc_malloc);
void free(void* ptr) __THROW ALIAS(tc_free);
void* realloc(void* ptr, size_t size) __THROW ALIAS(tc_realloc);
void* calloc(size_t n, size_t size) __THROW ALIAS(tc_calloc);
void cfree(void* ptr) __THROW ALIAS(tc_cfree);
void* memalign(size_t align, size_t s) __THROW ALIAS(tc_memalign);
void* valloc(size_t size) __THROW ALIAS(tc_valloc);
void* pvalloc(size_t size) __THROW ALIAS(tc_pvalloc);
int posix_memalign(void** r, size_t a, size_t s) __THROW
ALIAS(tc_posix_memalign);
#ifndef __UCLIBC__
void malloc_stats(void) __THROW ALIAS(tc_malloc_stats);
#endif
int mallopt(int cmd, int value) __THROW ALIAS(tc_mallopt);
#ifdef HAVE_STRUCT_MALLINFO
struct mallinfo mallinfo(void) __THROW ALIAS(tc_mallinfo);
#endif
size_t malloc_size(void* p) __THROW ALIAS(tc_malloc_size);
#if defined(__ANDROID__)
size_t malloc_usable_size(const void* p) __THROW
ALIAS(tc_malloc_size);
#else
size_t malloc_usable_size(void* p) __THROW ALIAS(tc_malloc_size);
#endif
} // extern "C"
#undef ALIAS
// No need to do anything at tcmalloc-registration time: we do it all
// via overriding weak symbols (at link time).
static void ReplaceSystemAlloc() { }
#endif // TCMALLOC_LIBC_OVERRIDE_GCC_AND_WEAK_INL_H_

View File

@ -1,92 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Craig Silverstein <opensource@google.com>
//
// Used to override malloc routines on systems that are using glibc.
#ifndef TCMALLOC_LIBC_OVERRIDE_GLIBC_INL_H_
#define TCMALLOC_LIBC_OVERRIDE_GLIBC_INL_H_
#include "config.h"
#include <features.h> // for __GLIBC__
#include <gperftools/tcmalloc.h>
#ifndef __GLIBC__
# error libc_override_glibc.h is for glibc distributions only.
#endif
// In glibc, the memory-allocation methods are weak symbols, so we can
// just override them with our own. If we're using gcc, we can use
// __attribute__((alias)) to do the overriding easily (exception:
// Mach-O, which doesn't support aliases). Otherwise we have to use a
// function call.
#if !defined(__GNUC__) || defined(__MACH__)
// This also defines ReplaceSystemAlloc().
# include "libc_override_redefine.h" // defines functions malloc()/etc
#else // #if !defined(__GNUC__) || defined(__MACH__)
// If we get here, we're a gcc system, so do all the overriding we do
// with gcc. This does the overriding of all the 'normal' memory
// allocation. This also defines ReplaceSystemAlloc().
# include "libc_override_gcc_and_weak.h"
// We also have to do some glibc-specific overriding. Some library
// routines on RedHat 9 allocate memory using malloc() and free it
// using __libc_free() (or vice-versa). Since we provide our own
// implementations of malloc/free, we need to make sure that the
// __libc_XXX variants (defined as part of glibc) also point to the
// same implementations. Since it only matters for redhat, we
// do it inside the gcc #ifdef, since redhat uses gcc.
// TODO(csilvers): only do this if we detect we're an old enough glibc?
#define ALIAS(tc_fn) __attribute__ ((alias (#tc_fn)))
extern "C" {
void* __libc_malloc(size_t size) ALIAS(tc_malloc);
void __libc_free(void* ptr) ALIAS(tc_free);
void* __libc_realloc(void* ptr, size_t size) ALIAS(tc_realloc);
void* __libc_calloc(size_t n, size_t size) ALIAS(tc_calloc);
void __libc_cfree(void* ptr) ALIAS(tc_cfree);
void* __libc_memalign(size_t align, size_t s) ALIAS(tc_memalign);
void* __libc_valloc(size_t size) ALIAS(tc_valloc);
void* __libc_pvalloc(size_t size) ALIAS(tc_pvalloc);
int __posix_memalign(void** r, size_t a, size_t s) ALIAS(tc_posix_memalign);
} // extern "C"
#undef ALIAS
#endif // #if defined(__GNUC__) && !defined(__MACH__)
// No need to write ReplaceSystemAlloc(); one of the #includes above
// did it for us.
#endif // TCMALLOC_LIBC_OVERRIDE_GLIBC_INL_H_

View File

@ -1,308 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Craig Silverstein <opensource@google.com>
//
// Used to override malloc routines on OS X systems. We use the
// malloc-zone functionality built into OS X to register our malloc
// routine.
//
// 1) We used to use the normal 'override weak libc malloc/etc'
// technique for OS X. This is not optimal because mach does not
// support the 'alias' attribute, so we had to have forwarding
// functions. It also does not work very well with OS X shared
// libraries (dylibs) -- in general, the shared libs don't use
// tcmalloc unless run with the DYLD_FORCE_FLAT_NAMESPACE envvar.
//
// 2) Another approach would be to use an interposition array:
// static const interpose_t interposers[] __attribute__((section("__DATA, __interpose"))) = {
// { (void *)tc_malloc, (void *)malloc },
// { (void *)tc_free, (void *)free },
// };
// This requires the user to set the DYLD_INSERT_LIBRARIES envvar, so
// is not much better.
//
// 3) Registering a new malloc zone avoids all these issues:
// http://www.opensource.apple.com/source/Libc/Libc-583/include/malloc/malloc.h
// http://www.opensource.apple.com/source/Libc/Libc-583/gen/malloc.c
// If we make tcmalloc the default malloc zone (undocumented but
// possible) then all new allocs use it, even those in shared
// libraries. Allocs done before tcmalloc was installed, or in libs
// that aren't using tcmalloc for some reason, will correctly go
// through the malloc-zone interface when free-ing, and will pick up
// the libc free rather than tcmalloc free. So it should "never"
// cause a crash (famous last words).
//
// 4) The routines one must define for one's own malloc have changed
// between OS X versions. This requires some hoops on our part, but
// is only really annoying when it comes to posix_memalign. The right
// behavior there depends on what OS version tcmalloc was compiled on,
// but also what OS version the program is running on. For now, we
// punt and don't implement our own posix_memalign. Apps that really
// care can use tc_posix_memalign directly.
#ifndef TCMALLOC_LIBC_OVERRIDE_OSX_INL_H_
#define TCMALLOC_LIBC_OVERRIDE_OSX_INL_H_
#include "config.h"
#ifdef HAVE_FEATURES_H
#include <features.h>
#endif
#include <gperftools/tcmalloc.h>
#if !defined(__APPLE__)
# error libc_override_glibc-osx.h is for OS X distributions only.
#endif
#include <AvailabilityMacros.h>
#include <malloc/malloc.h>
namespace tcmalloc {
void CentralCacheLockAll();
void CentralCacheUnlockAll();
}
// from AvailabilityMacros.h
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
extern "C" {
// This function is only available on 10.6 (and later) but the
// LibSystem headers do not use AvailabilityMacros.h to handle weak
// importing automatically. This prototype is a copy of the one in
// <malloc/malloc.h> with the WEAK_IMPORT_ATTRBIUTE added.
extern malloc_zone_t *malloc_default_purgeable_zone(void)
WEAK_IMPORT_ATTRIBUTE;
}
#endif
// We need to provide wrappers around all the libc functions.
namespace {
size_t mz_size(malloc_zone_t* zone, const void* ptr) {
if (MallocExtension::instance()->GetOwnership(ptr) != MallocExtension::kOwned)
return 0; // malloc_zone semantics: return 0 if we don't own the memory
// TODO(csilvers): change this method to take a const void*, one day.
return MallocExtension::instance()->GetAllocatedSize(const_cast<void*>(ptr));
}
void* mz_malloc(malloc_zone_t* zone, size_t size) {
return tc_malloc(size);
}
void* mz_calloc(malloc_zone_t* zone, size_t num_items, size_t size) {
return tc_calloc(num_items, size);
}
void* mz_valloc(malloc_zone_t* zone, size_t size) {
return tc_valloc(size);
}
void mz_free(malloc_zone_t* zone, void* ptr) {
return tc_free(ptr);
}
void* mz_realloc(malloc_zone_t* zone, void* ptr, size_t size) {
return tc_realloc(ptr, size);
}
void* mz_memalign(malloc_zone_t* zone, size_t align, size_t size) {
return tc_memalign(align, size);
}
void mz_destroy(malloc_zone_t* zone) {
// A no-op -- we will not be destroyed!
}
// malloc_introspection callbacks. I'm not clear on what all of these do.
kern_return_t mi_enumerator(task_t task, void *,
unsigned type_mask, vm_address_t zone_address,
memory_reader_t reader,
vm_range_recorder_t recorder) {
// Should enumerate all the pointers we have. Seems like a lot of work.
return KERN_FAILURE;
}
size_t mi_good_size(malloc_zone_t *zone, size_t size) {
// I think it's always safe to return size, but we maybe could do better.
return size;
}
boolean_t mi_check(malloc_zone_t *zone) {
return MallocExtension::instance()->VerifyAllMemory();
}
void mi_print(malloc_zone_t *zone, boolean_t verbose) {
int bufsize = 8192;
if (verbose)
bufsize = 102400; // I picked this size arbitrarily
char* buffer = new char[bufsize];
MallocExtension::instance()->GetStats(buffer, bufsize);
fprintf(stdout, "%s", buffer);
delete[] buffer;
}
void mi_log(malloc_zone_t *zone, void *address) {
// I don't think we support anything like this
}
void mi_force_lock(malloc_zone_t *zone) {
tcmalloc::CentralCacheLockAll();
}
void mi_force_unlock(malloc_zone_t *zone) {
tcmalloc::CentralCacheUnlockAll();
}
void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
// TODO(csilvers): figure out how to fill these out
stats->blocks_in_use = 0;
stats->size_in_use = 0;
stats->max_size_in_use = 0;
stats->size_allocated = 0;
}
boolean_t mi_zone_locked(malloc_zone_t *zone) {
return false; // Hopefully unneeded by us!
}
} // unnamed namespace
// OS X doesn't have pvalloc, cfree, malloc_statc, etc, so we can just
// define our own. :-) OS X supplies posix_memalign in some versions
// but not others, either strongly or weakly linked, in a way that's
// difficult enough to code to correctly, that I just don't try to
// support either memalign() or posix_memalign(). If you need them
// and are willing to code to tcmalloc, you can use tc_posix_memalign().
extern "C" {
void cfree(void* p) { tc_cfree(p); }
void* pvalloc(size_t s) { return tc_pvalloc(s); }
void malloc_stats(void) { tc_malloc_stats(); }
int mallopt(int cmd, int v) { return tc_mallopt(cmd, v); }
// No struct mallinfo on OS X, so don't define mallinfo().
// An alias for malloc_size(), which OS X defines.
size_t malloc_usable_size(void* p) { return tc_malloc_size(p); }
} // extern "C"
static malloc_zone_t *get_default_zone() {
malloc_zone_t **zones = NULL;
unsigned int num_zones = 0;
/*
* On OSX 10.12, malloc_default_zone returns a special zone that is not
* present in the list of registered zones. That zone uses a "lite zone"
* if one is present (apparently enabled when malloc stack logging is
* enabled), or the first registered zone otherwise. In practice this
* means unless malloc stack logging is enabled, the first registered
* zone is the default.
* So get the list of zones to get the first one, instead of relying on
* malloc_default_zone.
*/
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, (vm_address_t**) &zones,
&num_zones)) {
/* Reset the value in case the failure happened after it was set. */
num_zones = 0;
}
if (num_zones)
return zones[0];
return malloc_default_zone();
}
static void ReplaceSystemAlloc() {
static malloc_introspection_t tcmalloc_introspection;
memset(&tcmalloc_introspection, 0, sizeof(tcmalloc_introspection));
tcmalloc_introspection.enumerator = &mi_enumerator;
tcmalloc_introspection.good_size = &mi_good_size;
tcmalloc_introspection.check = &mi_check;
tcmalloc_introspection.print = &mi_print;
tcmalloc_introspection.log = &mi_log;
tcmalloc_introspection.force_lock = &mi_force_lock;
tcmalloc_introspection.force_unlock = &mi_force_unlock;
static malloc_zone_t tcmalloc_zone;
memset(&tcmalloc_zone, 0, sizeof(malloc_zone_t));
// Start with a version 4 zone which is used for OS X 10.4 and 10.5.
tcmalloc_zone.version = 4;
tcmalloc_zone.zone_name = "tcmalloc";
tcmalloc_zone.size = &mz_size;
tcmalloc_zone.malloc = &mz_malloc;
tcmalloc_zone.calloc = &mz_calloc;
tcmalloc_zone.valloc = &mz_valloc;
tcmalloc_zone.free = &mz_free;
tcmalloc_zone.realloc = &mz_realloc;
tcmalloc_zone.destroy = &mz_destroy;
tcmalloc_zone.batch_malloc = NULL;
tcmalloc_zone.batch_free = NULL;
tcmalloc_zone.introspect = &tcmalloc_introspection;
// from AvailabilityMacros.h
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
// Switch to version 6 on OSX 10.6 to support memalign.
tcmalloc_zone.version = 6;
tcmalloc_zone.free_definite_size = NULL;
tcmalloc_zone.memalign = &mz_memalign;
tcmalloc_introspection.zone_locked = &mi_zone_locked;
// Request the default purgable zone to force its creation. The
// current default zone is registered with the purgable zone for
// doing tiny and small allocs. Sadly, it assumes that the default
// zone is the szone implementation from OS X and will crash if it
// isn't. By creating the zone now, this will be true and changing
// the default zone won't cause a problem. This only needs to
// happen when actually running on OS X 10.6 and higher (note the
// ifdef above only checks if we were *compiled* with 10.6 or
// higher; at runtime we have to check if this symbol is defined.)
if (malloc_default_purgeable_zone) {
malloc_default_purgeable_zone();
}
#endif
// Register the tcmalloc zone. At this point, it will not be the
// default zone.
malloc_zone_register(&tcmalloc_zone);
// Unregister and reregister the default zone. Unregistering swaps
// the specified zone with the last one registered which for the
// default zone makes the more recently registered zone the default
// zone. The default zone is then re-registered to ensure that
// allocations made from it earlier will be handled correctly.
// Things are not guaranteed to work that way, but it's how they work now.
malloc_zone_t *default_zone = get_default_zone();
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
}
#endif // TCMALLOC_LIBC_OVERRIDE_OSX_INL_H_

View File

@ -1,92 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Craig Silverstein <opensource@google.com>
//
// Used on systems that don't have their own definition of
// malloc/new/etc. (Typically this will be a windows msvcrt.dll that
// has been edited to remove the definitions.) We can just define our
// own as normal functions.
//
// This should also work on systems were all the malloc routines are
// defined as weak symbols, and there's no support for aliasing.
#ifndef TCMALLOC_LIBC_OVERRIDE_REDEFINE_H_
#define TCMALLOC_LIBC_OVERRIDE_REDEFINE_H_
void* operator new(size_t size) { return tc_new(size); }
void operator delete(void* p) throw() { tc_delete(p); }
void* operator new[](size_t size) { return tc_newarray(size); }
void operator delete[](void* p) throw() { tc_deletearray(p); }
void* operator new(size_t size, const std::nothrow_t& nt) throw() {
return tc_new_nothrow(size, nt);
}
void* operator new[](size_t size, const std::nothrow_t& nt) throw() {
return tc_newarray_nothrow(size, nt);
}
void operator delete(void* ptr, const std::nothrow_t& nt) throw() {
return tc_delete_nothrow(ptr, nt);
}
void operator delete[](void* ptr, const std::nothrow_t& nt) throw() {
return tc_deletearray_nothrow(ptr, nt);
}
#ifdef ENABLE_SIZED_DELETE
void operator delete(void* p, size_t s) throw() { tc_delete_sized(p, s); }
void operator delete[](void* p, size_t s) throw(){ tc_deletearray_sized(p); }
#endif
extern "C" {
void* malloc(size_t s) { return tc_malloc(s); }
void free(void* p) { tc_free(p); }
void* realloc(void* p, size_t s) { return tc_realloc(p, s); }
void* calloc(size_t n, size_t s) { return tc_calloc(n, s); }
void cfree(void* p) { tc_cfree(p); }
void* memalign(size_t a, size_t s) { return tc_memalign(a, s); }
void* valloc(size_t s) { return tc_valloc(s); }
void* pvalloc(size_t s) { return tc_pvalloc(s); }
int posix_memalign(void** r, size_t a, size_t s) {
return tc_posix_memalign(r, a, s);
}
void malloc_stats(void) { tc_malloc_stats(); }
int mallopt(int cmd, int v) { return tc_mallopt(cmd, v); }
#ifdef HAVE_STRUCT_MALLINFO
struct mallinfo mallinfo(void) { return tc_mallinfo(); }
#endif
size_t malloc_size(void* p) { return tc_malloc_size(p); }
size_t malloc_usable_size(void* p) { return tc_malloc_size(p); }
} // extern "C"
// No need to do anything at tcmalloc-registration time: we do it all
// via overriding weak symbols (at link time).
static void ReplaceSystemAlloc() { }
#endif // TCMALLOC_LIBC_OVERRIDE_REDEFINE_H_

View File

@ -1,103 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
//
// Some very basic linked list functions for dealing with using void * as
// storage.
#ifndef TCMALLOC_LINKED_LIST_H_
#define TCMALLOC_LINKED_LIST_H_
#include <stddef.h>
namespace tcmalloc {
inline void *SLL_Next(void *t) {
return *(reinterpret_cast<void**>(t));
}
inline void SLL_SetNext(void *t, void *n) {
*(reinterpret_cast<void**>(t)) = n;
}
inline void SLL_Push(void **list, void *element) {
SLL_SetNext(element, *list);
*list = element;
}
inline void *SLL_Pop(void **list) {
void *result = *list;
*list = SLL_Next(*list);
return result;
}
// Remove N elements from a linked list to which head points. head will be
// modified to point to the new head. start and end will point to the first
// and last nodes of the range. Note that end will point to NULL after this
// function is called.
inline void SLL_PopRange(void **head, int N, void **start, void **end) {
if (N == 0) {
*start = NULL;
*end = NULL;
return;
}
void *tmp = *head;
for (int i = 1; i < N; ++i) {
tmp = SLL_Next(tmp);
}
*start = *head;
*end = tmp;
*head = SLL_Next(tmp);
// Unlink range from list.
SLL_SetNext(tmp, NULL);
}
inline void SLL_PushRange(void **head, void *start, void *end) {
if (!start) return;
SLL_SetNext(end, *head);
*head = start;
}
inline size_t SLL_Size(void *head) {
int count = 0;
while (head) {
count++;
head = SLL_Next(head);
}
return count;
}
} // namespace tcmalloc
#endif // TCMALLOC_LINKED_LIST_H_

View File

@ -1,388 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
#include "config.h"
#include <assert.h>
#include <string.h>
#include <stdio.h>
#if defined HAVE_STDINT_H
#include <stdint.h>
#elif defined HAVE_INTTYPES_H
#include <inttypes.h>
#else
#include <sys/types.h>
#endif
#include <string>
#include "base/dynamic_annotations.h"
#include "base/sysinfo.h" // for FillProcSelfMaps
#ifndef NO_HEAP_CHECK
#include "gperftools/heap-checker.h"
#endif
#include "gperftools/malloc_extension.h"
#include "gperftools/malloc_extension_c.h"
#include "maybe_threads.h"
#include "base/googleinit.h"
using STL_NAMESPACE::string;
using STL_NAMESPACE::vector;
static void DumpAddressMap(string* result) {
*result += "\nMAPPED_LIBRARIES:\n";
// We keep doubling until we get a fit
const size_t old_resultlen = result->size();
for (int amap_size = 10240; amap_size < 10000000; amap_size *= 2) {
result->resize(old_resultlen + amap_size);
bool wrote_all = false;
const int bytes_written =
tcmalloc::FillProcSelfMaps(&((*result)[old_resultlen]), amap_size,
&wrote_all);
if (wrote_all) { // we fit!
(*result)[old_resultlen + bytes_written] = '\0';
result->resize(old_resultlen + bytes_written);
return;
}
}
result->reserve(old_resultlen); // just don't print anything
}
// Note: this routine is meant to be called before threads are spawned.
void MallocExtension::Initialize() {
static bool initialize_called = false;
if (initialize_called) return;
initialize_called = true;
#ifdef __GLIBC__
// GNU libc++ versions 3.3 and 3.4 obey the environment variables
// GLIBCPP_FORCE_NEW and GLIBCXX_FORCE_NEW respectively. Setting
// one of these variables forces the STL default allocator to call
// new() or delete() for each allocation or deletion. Otherwise
// the STL allocator tries to avoid the high cost of doing
// allocations by pooling memory internally. However, tcmalloc
// does allocations really fast, especially for the types of small
// items one sees in STL, so it's better off just using us.
// TODO: control whether we do this via an environment variable?
setenv("GLIBCPP_FORCE_NEW", "1", false /* no overwrite*/);
setenv("GLIBCXX_FORCE_NEW", "1", false /* no overwrite*/);
// Now we need to make the setenv 'stick', which it may not do since
// the env is flakey before main() is called. But luckily stl only
// looks at this env var the first time it tries to do an alloc, and
// caches what it finds. So we just cause an stl alloc here.
string dummy("I need to be allocated");
dummy += "!"; // so the definition of dummy isn't optimized out
#endif /* __GLIBC__ */
}
// SysAllocator implementation
SysAllocator::~SysAllocator() {}
// Default implementation -- does nothing
MallocExtension::~MallocExtension() { }
bool MallocExtension::VerifyAllMemory() { return true; }
bool MallocExtension::VerifyNewMemory(const void* p) { return true; }
bool MallocExtension::VerifyArrayNewMemory(const void* p) { return true; }
bool MallocExtension::VerifyMallocMemory(const void* p) { return true; }
bool MallocExtension::GetNumericProperty(const char* property, size_t* value) {
return false;
}
bool MallocExtension::SetNumericProperty(const char* property, size_t value) {
return false;
}
void MallocExtension::GetStats(char* buffer, int length) {
assert(length > 0);
buffer[0] = '\0';
}
bool MallocExtension::MallocMemoryStats(int* blocks, size_t* total,
int histogram[kMallocHistogramSize]) {
*blocks = 0;
*total = 0;
memset(histogram, 0, sizeof(*histogram) * kMallocHistogramSize);
return true;
}
void** MallocExtension::ReadStackTraces(int* sample_period) {
return NULL;
}
void** MallocExtension::ReadHeapGrowthStackTraces() {
return NULL;
}
void MallocExtension::MarkThreadIdle() {
// Default implementation does nothing
}
void MallocExtension::MarkThreadBusy() {
// Default implementation does nothing
}
SysAllocator* MallocExtension::GetSystemAllocator() {
return NULL;
}
void MallocExtension::SetSystemAllocator(SysAllocator *a) {
// Default implementation does nothing
}
void MallocExtension::ReleaseToSystem(size_t num_bytes) {
// Default implementation does nothing
}
void MallocExtension::ReleaseFreeMemory() {
ReleaseToSystem(static_cast<size_t>(-1)); // SIZE_T_MAX
}
void MallocExtension::SetMemoryReleaseRate(double rate) {
// Default implementation does nothing
}
double MallocExtension::GetMemoryReleaseRate() {
return -1.0;
}
size_t MallocExtension::GetEstimatedAllocatedSize(size_t size) {
return size;
}
size_t MallocExtension::GetAllocatedSize(const void* p) {
assert(GetOwnership(p) != kNotOwned);
return 0;
}
MallocExtension::Ownership MallocExtension::GetOwnership(const void* p) {
return kUnknownOwnership;
}
void MallocExtension::GetFreeListSizes(
vector<MallocExtension::FreeListInfo>* v) {
v->clear();
}
size_t MallocExtension::GetThreadCacheSize() {
return 0;
}
void MallocExtension::MarkThreadTemporarilyIdle() {
// Default implementation does nothing
}
// The current malloc extension object.
static MallocExtension* current_instance;
static void InitModule() {
if (current_instance != NULL) {
return;
}
current_instance = new MallocExtension;
#ifndef NO_HEAP_CHECK
HeapLeakChecker::IgnoreObject(current_instance);
#endif
}
REGISTER_MODULE_INITIALIZER(malloc_extension_init, InitModule())
MallocExtension* MallocExtension::instance() {
InitModule();
return current_instance;
}
void MallocExtension::Register(MallocExtension* implementation) {
InitModule();
// When running under valgrind, our custom malloc is replaced with
// valgrind's one and malloc extensions will not work. (Note:
// callers should be responsible for checking that they are the
// malloc that is really being run, before calling Register. This
// is just here as an extra sanity check.)
if (!RunningOnValgrind()) {
current_instance = implementation;
}
}
// -----------------------------------------------------------------------
// Heap sampling support
// -----------------------------------------------------------------------
namespace {
// Accessors
uintptr_t Count(void** entry) {
return reinterpret_cast<uintptr_t>(entry[0]);
}
uintptr_t Size(void** entry) {
return reinterpret_cast<uintptr_t>(entry[1]);
}
uintptr_t Depth(void** entry) {
return reinterpret_cast<uintptr_t>(entry[2]);
}
void* PC(void** entry, int i) {
return entry[3+i];
}
void PrintCountAndSize(MallocExtensionWriter* writer,
uintptr_t count, uintptr_t size) {
char buf[100];
snprintf(buf, sizeof(buf),
"%6" PRIu64 ": %8" PRIu64 " [%6" PRIu64 ": %8" PRIu64 "] @",
static_cast<uint64>(count),
static_cast<uint64>(size),
static_cast<uint64>(count),
static_cast<uint64>(size));
writer->append(buf, strlen(buf));
}
void PrintHeader(MallocExtensionWriter* writer,
const char* label, void** entries) {
// Compute the total count and total size
uintptr_t total_count = 0;
uintptr_t total_size = 0;
for (void** entry = entries; Count(entry) != 0; entry += 3 + Depth(entry)) {
total_count += Count(entry);
total_size += Size(entry);
}
const char* const kTitle = "heap profile: ";
writer->append(kTitle, strlen(kTitle));
PrintCountAndSize(writer, total_count, total_size);
writer->append(" ", 1);
writer->append(label, strlen(label));
writer->append("\n", 1);
}
void PrintStackEntry(MallocExtensionWriter* writer, void** entry) {
PrintCountAndSize(writer, Count(entry), Size(entry));
for (int i = 0; i < Depth(entry); i++) {
char buf[32];
snprintf(buf, sizeof(buf), " %p", PC(entry, i));
writer->append(buf, strlen(buf));
}
writer->append("\n", 1);
}
}
void MallocExtension::GetHeapSample(MallocExtensionWriter* writer) {
int sample_period = 0;
void** entries = ReadStackTraces(&sample_period);
if (entries == NULL) {
const char* const kErrorMsg =
"This malloc implementation does not support sampling.\n"
"As of 2005/01/26, only tcmalloc supports sampling, and\n"
"you are probably running a binary that does not use\n"
"tcmalloc.\n";
writer->append(kErrorMsg, strlen(kErrorMsg));
return;
}
char label[32];
sprintf(label, "heap_v2/%d", sample_period);
PrintHeader(writer, label, entries);
for (void** entry = entries; Count(entry) != 0; entry += 3 + Depth(entry)) {
PrintStackEntry(writer, entry);
}
delete[] entries;
DumpAddressMap(writer);
}
void MallocExtension::GetHeapGrowthStacks(MallocExtensionWriter* writer) {
void** entries = ReadHeapGrowthStackTraces();
if (entries == NULL) {
const char* const kErrorMsg =
"This malloc implementation does not support "
"ReadHeapGrowthStackTraces().\n"
"As of 2005/09/27, only tcmalloc supports this, and you\n"
"are probably running a binary that does not use tcmalloc.\n";
writer->append(kErrorMsg, strlen(kErrorMsg));
return;
}
// Do not canonicalize the stack entries, so that we get a
// time-ordered list of stack traces, which may be useful if the
// client wants to focus on the latest stack traces.
PrintHeader(writer, "growth", entries);
for (void** entry = entries; Count(entry) != 0; entry += 3 + Depth(entry)) {
PrintStackEntry(writer, entry);
}
delete[] entries;
DumpAddressMap(writer);
}
void MallocExtension::Ranges(void* arg, RangeFunction func) {
// No callbacks by default
}
// These are C shims that work on the current instance.
#define C_SHIM(fn, retval, paramlist, arglist) \
extern "C" PERFTOOLS_DLL_DECL retval MallocExtension_##fn paramlist { \
return MallocExtension::instance()->fn arglist; \
}
C_SHIM(VerifyAllMemory, int, (void), ());
C_SHIM(VerifyNewMemory, int, (const void* p), (p));
C_SHIM(VerifyArrayNewMemory, int, (const void* p), (p));
C_SHIM(VerifyMallocMemory, int, (const void* p), (p));
C_SHIM(MallocMemoryStats, int,
(int* blocks, size_t* total, int histogram[kMallocHistogramSize]),
(blocks, total, histogram));
C_SHIM(GetStats, void,
(char* buffer, int buffer_length), (buffer, buffer_length));
C_SHIM(GetNumericProperty, int,
(const char* property, size_t* value), (property, value));
C_SHIM(SetNumericProperty, int,
(const char* property, size_t value), (property, value));
C_SHIM(MarkThreadIdle, void, (void), ());
C_SHIM(MarkThreadBusy, void, (void), ());
C_SHIM(ReleaseFreeMemory, void, (void), ());
C_SHIM(ReleaseToSystem, void, (size_t num_bytes), (num_bytes));
C_SHIM(GetEstimatedAllocatedSize, size_t, (size_t size), (size));
C_SHIM(GetAllocatedSize, size_t, (const void* p), (p));
C_SHIM(GetThreadCacheSize, size_t, (void), ());
C_SHIM(MarkThreadTemporarilyIdle, void, (void), ());
// Can't use the shim here because of the need to translate the enums.
extern "C"
MallocExtension_Ownership MallocExtension_GetOwnership(const void* p) {
return static_cast<MallocExtension_Ownership>(
MallocExtension::instance()->GetOwnership(p));
}

View File

@ -1,249 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// This has the implementation details of malloc_hook that are needed
// to use malloc-hook inside the tcmalloc system. It does not hold
// any of the client-facing calls that are used to add new hooks.
#ifndef _MALLOC_HOOK_INL_H_
#define _MALLOC_HOOK_INL_H_
#include <stddef.h>
#include <sys/types.h>
#include "base/atomicops.h"
#include "base/basictypes.h"
#include <gperftools/malloc_hook.h>
#include "common.h" // for UNLIKELY
namespace base { namespace internal {
// Capacity of 8 means that HookList is 9 words.
static const int kHookListCapacity = 8;
// last entry is reserved for deprecated "singular" hooks. So we have
// 7 "normal" hooks per list
static const int kHookListMaxValues = 7;
static const int kHookListSingularIdx = 7;
// HookList: a class that provides synchronized insertions and removals and
// lockless traversal. Most of the implementation is in malloc_hook.cc.
template <typename T>
struct PERFTOOLS_DLL_DECL HookList {
COMPILE_ASSERT(sizeof(T) <= sizeof(AtomicWord), T_should_fit_in_AtomicWord);
// Adds value to the list. Note that duplicates are allowed. Thread-safe and
// blocking (acquires hooklist_spinlock). Returns true on success; false
// otherwise (failures include invalid value and no space left).
bool Add(T value);
void FixupPrivEndLocked();
// Removes the first entry matching value from the list. Thread-safe and
// blocking (acquires hooklist_spinlock). Returns true on success; false
// otherwise (failures include invalid value and no value found).
bool Remove(T value);
// Store up to n values of the list in output_array, and return the number of
// elements stored. Thread-safe and non-blocking. This is fast (one memory
// access) if the list is empty.
int Traverse(T* output_array, int n) const;
// Fast inline implementation for fast path of Invoke*Hook.
bool empty() const {
return base::subtle::NoBarrier_Load(&priv_end) == 0;
}
// Used purely to handle deprecated singular hooks
T GetSingular() const {
const AtomicWord *place = &priv_data[kHookListSingularIdx];
return bit_cast<T>(base::subtle::NoBarrier_Load(place));
}
T ExchangeSingular(T new_val);
// This internal data is not private so that the class is an aggregate and can
// be initialized by the linker. Don't access this directly. Use the
// INIT_HOOK_LIST macro in malloc_hook.cc.
// One more than the index of the last valid element in priv_data. During
// 'Remove' this may be past the last valid element in priv_data, but
// subsequent values will be 0.
//
// Index kHookListCapacity-1 is reserved as 'deprecated' single hook pointer
AtomicWord priv_end;
AtomicWord priv_data[kHookListCapacity];
};
ATTRIBUTE_VISIBILITY_HIDDEN extern HookList<MallocHook::NewHook> new_hooks_;
ATTRIBUTE_VISIBILITY_HIDDEN extern HookList<MallocHook::DeleteHook> delete_hooks_;
ATTRIBUTE_VISIBILITY_HIDDEN extern HookList<MallocHook::PreMmapHook> premmap_hooks_;
ATTRIBUTE_VISIBILITY_HIDDEN extern HookList<MallocHook::MmapHook> mmap_hooks_;
ATTRIBUTE_VISIBILITY_HIDDEN extern HookList<MallocHook::MmapReplacement> mmap_replacement_;
ATTRIBUTE_VISIBILITY_HIDDEN extern HookList<MallocHook::MunmapHook> munmap_hooks_;
ATTRIBUTE_VISIBILITY_HIDDEN extern HookList<MallocHook::MunmapReplacement> munmap_replacement_;
ATTRIBUTE_VISIBILITY_HIDDEN extern HookList<MallocHook::MremapHook> mremap_hooks_;
ATTRIBUTE_VISIBILITY_HIDDEN extern HookList<MallocHook::PreSbrkHook> presbrk_hooks_;
ATTRIBUTE_VISIBILITY_HIDDEN extern HookList<MallocHook::SbrkHook> sbrk_hooks_;
} } // namespace base::internal
// The following method is DEPRECATED
inline MallocHook::NewHook MallocHook::GetNewHook() {
return base::internal::new_hooks_.GetSingular();
}
inline void MallocHook::InvokeNewHook(const void* p, size_t s) {
if (UNLIKELY(!base::internal::new_hooks_.empty())) {
InvokeNewHookSlow(p, s);
}
}
// The following method is DEPRECATED
inline MallocHook::DeleteHook MallocHook::GetDeleteHook() {
return base::internal::delete_hooks_.GetSingular();
}
inline void MallocHook::InvokeDeleteHook(const void* p) {
if (UNLIKELY(!base::internal::delete_hooks_.empty())) {
InvokeDeleteHookSlow(p);
}
}
// The following method is DEPRECATED
inline MallocHook::PreMmapHook MallocHook::GetPreMmapHook() {
return base::internal::premmap_hooks_.GetSingular();
}
inline void MallocHook::InvokePreMmapHook(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset) {
if (!base::internal::premmap_hooks_.empty()) {
InvokePreMmapHookSlow(start, size, protection, flags, fd, offset);
}
}
// The following method is DEPRECATED
inline MallocHook::MmapHook MallocHook::GetMmapHook() {
return base::internal::mmap_hooks_.GetSingular();
}
inline void MallocHook::InvokeMmapHook(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset) {
if (!base::internal::mmap_hooks_.empty()) {
InvokeMmapHookSlow(result, start, size, protection, flags, fd, offset);
}
}
inline bool MallocHook::InvokeMmapReplacement(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result) {
if (!base::internal::mmap_replacement_.empty()) {
return InvokeMmapReplacementSlow(start, size,
protection, flags,
fd, offset,
result);
}
return false;
}
// The following method is DEPRECATED
inline MallocHook::MunmapHook MallocHook::GetMunmapHook() {
return base::internal::munmap_hooks_.GetSingular();
}
inline void MallocHook::InvokeMunmapHook(const void* p, size_t size) {
if (!base::internal::munmap_hooks_.empty()) {
InvokeMunmapHookSlow(p, size);
}
}
inline bool MallocHook::InvokeMunmapReplacement(
const void* p, size_t size, int* result) {
if (!base::internal::mmap_replacement_.empty()) {
return InvokeMunmapReplacementSlow(p, size, result);
}
return false;
}
// The following method is DEPRECATED
inline MallocHook::MremapHook MallocHook::GetMremapHook() {
return base::internal::mremap_hooks_.GetSingular();
}
inline void MallocHook::InvokeMremapHook(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr) {
if (!base::internal::mremap_hooks_.empty()) {
InvokeMremapHookSlow(result, old_addr, old_size, new_size, flags, new_addr);
}
}
// The following method is DEPRECATED
inline MallocHook::PreSbrkHook MallocHook::GetPreSbrkHook() {
return base::internal::presbrk_hooks_.GetSingular();
}
inline void MallocHook::InvokePreSbrkHook(ptrdiff_t increment) {
if (!base::internal::presbrk_hooks_.empty() && increment != 0) {
InvokePreSbrkHookSlow(increment);
}
}
// The following method is DEPRECATED
inline MallocHook::SbrkHook MallocHook::GetSbrkHook() {
return base::internal::sbrk_hooks_.GetSingular();
}
inline void MallocHook::InvokeSbrkHook(const void* result,
ptrdiff_t increment) {
if (!base::internal::sbrk_hooks_.empty() && increment != 0) {
InvokeSbrkHookSlow(result, increment);
}
}
#endif /* _MALLOC_HOOK_INL_H_ */

View File

@ -1,703 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
#include "config.h"
// Disable the glibc prototype of mremap(), as older versions of the
// system headers define this function with only four arguments,
// whereas newer versions allow an optional fifth argument:
#ifdef HAVE_MMAP
# define mremap glibc_mremap
# include <sys/mman.h>
# undef mremap
#endif
#include <stddef.h>
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
#include <algorithm>
#include "base/logging.h"
#include "base/spinlock.h"
#include "maybe_emergency_malloc.h"
#include "maybe_threads.h"
#include "malloc_hook-inl.h"
#include <gperftools/malloc_hook.h>
// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
// you're porting to a system where you really can't get a stacktrace.
#ifdef NO_TCMALLOC_SAMPLES
// We use #define so code compiles even if you #include stacktrace.h somehow.
# define GetStackTrace(stack, depth, skip) (0)
#else
# include <gperftools/stacktrace.h>
#endif
// __THROW is defined in glibc systems. It means, counter-intuitively,
// "This function will never throw an exception." It's an optional
// optimization tool, but we may need to use it to match glibc prototypes.
#ifndef __THROW // I guess we're not on a glibc system
# define __THROW // __THROW is just an optimization, so ok to make it ""
#endif
using std::copy;
// Declaration of default weak initialization function, that can be overridden
// by linking-in a strong definition (as heap-checker.cc does). This is
// extern "C" so that it doesn't trigger gold's --detect-odr-violations warning,
// which only looks at C++ symbols.
//
// This function is declared here as weak, and defined later, rather than a more
// straightforward simple weak definition, as a workround for an icc compiler
// issue ((Intel reference 290819). This issue causes icc to resolve weak
// symbols too early, at compile rather than link time. By declaring it (weak)
// here, then defining it below after its use, we can avoid the problem.
extern "C" {
ATTRIBUTE_WEAK void MallocHook_InitAtFirstAllocation_HeapLeakChecker();
}
namespace {
void RemoveInitialHooksAndCallInitializers(); // below.
pthread_once_t once = PTHREAD_ONCE_INIT;
// These hooks are installed in MallocHook as the only initial hooks. The first
// hook that is called will run RemoveInitialHooksAndCallInitializers (see the
// definition below) and then redispatch to any malloc hooks installed by
// RemoveInitialHooksAndCallInitializers.
//
// Note(llib): there is a possibility of a race in the event that there are
// multiple threads running before the first allocation. This is pretty
// difficult to achieve, but if it is then multiple threads may concurrently do
// allocations. The first caller will call
// RemoveInitialHooksAndCallInitializers via one of the initial hooks. A
// concurrent allocation may, depending on timing either:
// * still have its initial malloc hook installed, run that and block on waiting
// for the first caller to finish its call to
// RemoveInitialHooksAndCallInitializers, and proceed normally.
// * occur some time during the RemoveInitialHooksAndCallInitializers call, at
// which point there could be no initial hooks and the subsequent hooks that
// are about to be set up by RemoveInitialHooksAndCallInitializers haven't
// been installed yet. I think the worst we can get is that some allocations
// will not get reported to some hooks set by the initializers called from
// RemoveInitialHooksAndCallInitializers.
void InitialNewHook(const void* ptr, size_t size) {
perftools_pthread_once(&once, &RemoveInitialHooksAndCallInitializers);
MallocHook::InvokeNewHook(ptr, size);
}
void InitialPreMMapHook(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset) {
perftools_pthread_once(&once, &RemoveInitialHooksAndCallInitializers);
MallocHook::InvokePreMmapHook(start, size, protection, flags, fd, offset);
}
void InitialPreSbrkHook(ptrdiff_t increment) {
perftools_pthread_once(&once, &RemoveInitialHooksAndCallInitializers);
MallocHook::InvokePreSbrkHook(increment);
}
// This function is called at most once by one of the above initial malloc
// hooks. It removes all initial hooks and initializes all other clients that
// want to get control at the very first memory allocation. The initializers
// may assume that the initial malloc hooks have been removed. The initializers
// may set up malloc hooks and allocate memory.
void RemoveInitialHooksAndCallInitializers() {
RAW_CHECK(MallocHook::RemoveNewHook(&InitialNewHook), "");
RAW_CHECK(MallocHook::RemovePreMmapHook(&InitialPreMMapHook), "");
RAW_CHECK(MallocHook::RemovePreSbrkHook(&InitialPreSbrkHook), "");
// HeapLeakChecker is currently the only module that needs to get control on
// the first memory allocation, but one can add other modules by following the
// same weak/strong function pattern.
MallocHook_InitAtFirstAllocation_HeapLeakChecker();
}
} // namespace
// Weak default initialization function that must go after its use.
extern "C" void MallocHook_InitAtFirstAllocation_HeapLeakChecker() {
// Do nothing.
}
namespace base { namespace internal {
// This lock is shared between all implementations of HookList::Add & Remove.
// The potential for contention is very small. This needs to be a SpinLock and
// not a Mutex since it's possible for Mutex locking to allocate memory (e.g.,
// per-thread allocation in debug builds), which could cause infinite recursion.
static SpinLock hooklist_spinlock(base::LINKER_INITIALIZED);
template <typename T>
bool HookList<T>::Add(T value_as_t) {
AtomicWord value = bit_cast<AtomicWord>(value_as_t);
if (value == 0) {
return false;
}
SpinLockHolder l(&hooklist_spinlock);
// Find the first slot in data that is 0.
int index = 0;
while ((index < kHookListMaxValues) &&
(base::subtle::NoBarrier_Load(&priv_data[index]) != 0)) {
++index;
}
if (index == kHookListMaxValues) {
return false;
}
AtomicWord prev_num_hooks = base::subtle::Acquire_Load(&priv_end);
base::subtle::NoBarrier_Store(&priv_data[index], value);
if (prev_num_hooks <= index) {
base::subtle::NoBarrier_Store(&priv_end, index + 1);
}
return true;
}
template <typename T>
void HookList<T>::FixupPrivEndLocked() {
AtomicWord hooks_end = base::subtle::NoBarrier_Load(&priv_end);
while ((hooks_end > 0) &&
(base::subtle::NoBarrier_Load(&priv_data[hooks_end - 1]) == 0)) {
--hooks_end;
}
base::subtle::NoBarrier_Store(&priv_end, hooks_end);
}
template <typename T>
bool HookList<T>::Remove(T value_as_t) {
if (value_as_t == 0) {
return false;
}
SpinLockHolder l(&hooklist_spinlock);
AtomicWord hooks_end = base::subtle::NoBarrier_Load(&priv_end);
int index = 0;
while (index < hooks_end && value_as_t != bit_cast<T>(
base::subtle::NoBarrier_Load(&priv_data[index]))) {
++index;
}
if (index == hooks_end) {
return false;
}
base::subtle::NoBarrier_Store(&priv_data[index], 0);
FixupPrivEndLocked();
return true;
}
template <typename T>
int HookList<T>::Traverse(T* output_array, int n) const {
AtomicWord hooks_end = base::subtle::Acquire_Load(&priv_end);
int actual_hooks_end = 0;
for (int i = 0; i < hooks_end && n > 0; ++i) {
AtomicWord data = base::subtle::Acquire_Load(&priv_data[i]);
if (data != 0) {
*output_array++ = bit_cast<T>(data);
++actual_hooks_end;
--n;
}
}
return actual_hooks_end;
}
template <typename T>
T HookList<T>::ExchangeSingular(T value_as_t) {
AtomicWord value = bit_cast<AtomicWord>(value_as_t);
AtomicWord old_value;
SpinLockHolder l(&hooklist_spinlock);
old_value = base::subtle::NoBarrier_Load(&priv_data[kHookListSingularIdx]);
base::subtle::NoBarrier_Store(&priv_data[kHookListSingularIdx], value);
if (value != 0) {
base::subtle::NoBarrier_Store(&priv_end, kHookListSingularIdx + 1);
} else {
FixupPrivEndLocked();
}
return bit_cast<T>(old_value);
}
// Initialize a HookList (optionally with the given initial_value in index 0).
#define INIT_HOOK_LIST { 0 }
#define INIT_HOOK_LIST_WITH_VALUE(initial_value) \
{ 1, { reinterpret_cast<AtomicWord>(initial_value) } }
// Explicit instantiation for malloc_hook_test.cc. This ensures all the methods
// are instantiated.
template struct HookList<MallocHook::NewHook>;
HookList<MallocHook::NewHook> new_hooks_ =
INIT_HOOK_LIST_WITH_VALUE(&InitialNewHook);
HookList<MallocHook::DeleteHook> delete_hooks_ = INIT_HOOK_LIST;
HookList<MallocHook::PreMmapHook> premmap_hooks_ =
INIT_HOOK_LIST_WITH_VALUE(&InitialPreMMapHook);
HookList<MallocHook::MmapHook> mmap_hooks_ = INIT_HOOK_LIST;
HookList<MallocHook::MunmapHook> munmap_hooks_ = INIT_HOOK_LIST;
HookList<MallocHook::MremapHook> mremap_hooks_ = INIT_HOOK_LIST;
HookList<MallocHook::PreSbrkHook> presbrk_hooks_ =
INIT_HOOK_LIST_WITH_VALUE(InitialPreSbrkHook);
HookList<MallocHook::SbrkHook> sbrk_hooks_ = INIT_HOOK_LIST;
// These lists contain either 0 or 1 hooks.
HookList<MallocHook::MmapReplacement> mmap_replacement_ = { 0 };
HookList<MallocHook::MunmapReplacement> munmap_replacement_ = { 0 };
#undef INIT_HOOK_LIST_WITH_VALUE
#undef INIT_HOOK_LIST
} } // namespace base::internal
using base::internal::kHookListMaxValues;
using base::internal::new_hooks_;
using base::internal::delete_hooks_;
using base::internal::premmap_hooks_;
using base::internal::mmap_hooks_;
using base::internal::mmap_replacement_;
using base::internal::munmap_hooks_;
using base::internal::munmap_replacement_;
using base::internal::mremap_hooks_;
using base::internal::presbrk_hooks_;
using base::internal::sbrk_hooks_;
// These are available as C bindings as well as C++, hence their
// definition outside the MallocHook class.
extern "C"
int MallocHook_AddNewHook(MallocHook_NewHook hook) {
RAW_VLOG(10, "AddNewHook(%p)", hook);
return new_hooks_.Add(hook);
}
extern "C"
int MallocHook_RemoveNewHook(MallocHook_NewHook hook) {
RAW_VLOG(10, "RemoveNewHook(%p)", hook);
return new_hooks_.Remove(hook);
}
extern "C"
int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook) {
RAW_VLOG(10, "AddDeleteHook(%p)", hook);
return delete_hooks_.Add(hook);
}
extern "C"
int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook) {
RAW_VLOG(10, "RemoveDeleteHook(%p)", hook);
return delete_hooks_.Remove(hook);
}
extern "C"
int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook) {
RAW_VLOG(10, "AddPreMmapHook(%p)", hook);
return premmap_hooks_.Add(hook);
}
extern "C"
int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook) {
RAW_VLOG(10, "RemovePreMmapHook(%p)", hook);
return premmap_hooks_.Remove(hook);
}
extern "C"
int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook) {
RAW_VLOG(10, "SetMmapReplacement(%p)", hook);
// NOTE this is a best effort CHECK. Concurrent sets could succeed since
// this test is outside of the Add spin lock.
RAW_CHECK(mmap_replacement_.empty(), "Only one MMapReplacement is allowed.");
return mmap_replacement_.Add(hook);
}
extern "C"
int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook) {
RAW_VLOG(10, "RemoveMmapReplacement(%p)", hook);
return mmap_replacement_.Remove(hook);
}
extern "C"
int MallocHook_AddMmapHook(MallocHook_MmapHook hook) {
RAW_VLOG(10, "AddMmapHook(%p)", hook);
return mmap_hooks_.Add(hook);
}
extern "C"
int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook) {
RAW_VLOG(10, "RemoveMmapHook(%p)", hook);
return mmap_hooks_.Remove(hook);
}
extern "C"
int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook) {
RAW_VLOG(10, "AddMunmapHook(%p)", hook);
return munmap_hooks_.Add(hook);
}
extern "C"
int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook) {
RAW_VLOG(10, "RemoveMunmapHook(%p)", hook);
return munmap_hooks_.Remove(hook);
}
extern "C"
int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook) {
RAW_VLOG(10, "SetMunmapReplacement(%p)", hook);
// NOTE this is a best effort CHECK. Concurrent sets could succeed since
// this test is outside of the Add spin lock.
RAW_CHECK(munmap_replacement_.empty(),
"Only one MunmapReplacement is allowed.");
return munmap_replacement_.Add(hook);
}
extern "C"
int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook) {
RAW_VLOG(10, "RemoveMunmapReplacement(%p)", hook);
return munmap_replacement_.Remove(hook);
}
extern "C"
int MallocHook_AddMremapHook(MallocHook_MremapHook hook) {
RAW_VLOG(10, "AddMremapHook(%p)", hook);
return mremap_hooks_.Add(hook);
}
extern "C"
int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook) {
RAW_VLOG(10, "RemoveMremapHook(%p)", hook);
return mremap_hooks_.Remove(hook);
}
extern "C"
int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook) {
RAW_VLOG(10, "AddPreSbrkHook(%p)", hook);
return presbrk_hooks_.Add(hook);
}
extern "C"
int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook) {
RAW_VLOG(10, "RemovePreSbrkHook(%p)", hook);
return presbrk_hooks_.Remove(hook);
}
extern "C"
int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook) {
RAW_VLOG(10, "AddSbrkHook(%p)", hook);
return sbrk_hooks_.Add(hook);
}
extern "C"
int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook) {
RAW_VLOG(10, "RemoveSbrkHook(%p)", hook);
return sbrk_hooks_.Remove(hook);
}
// The code below is DEPRECATED.
extern "C"
MallocHook_NewHook MallocHook_SetNewHook(MallocHook_NewHook hook) {
RAW_VLOG(10, "SetNewHook(%p)", hook);
return new_hooks_.ExchangeSingular(hook);
}
extern "C"
MallocHook_DeleteHook MallocHook_SetDeleteHook(MallocHook_DeleteHook hook) {
RAW_VLOG(10, "SetDeleteHook(%p)", hook);
return delete_hooks_.ExchangeSingular(hook);
}
extern "C"
MallocHook_PreMmapHook MallocHook_SetPreMmapHook(MallocHook_PreMmapHook hook) {
RAW_VLOG(10, "SetPreMmapHook(%p)", hook);
return premmap_hooks_.ExchangeSingular(hook);
}
extern "C"
MallocHook_MmapHook MallocHook_SetMmapHook(MallocHook_MmapHook hook) {
RAW_VLOG(10, "SetMmapHook(%p)", hook);
return mmap_hooks_.ExchangeSingular(hook);
}
extern "C"
MallocHook_MunmapHook MallocHook_SetMunmapHook(MallocHook_MunmapHook hook) {
RAW_VLOG(10, "SetMunmapHook(%p)", hook);
return munmap_hooks_.ExchangeSingular(hook);
}
extern "C"
MallocHook_MremapHook MallocHook_SetMremapHook(MallocHook_MremapHook hook) {
RAW_VLOG(10, "SetMremapHook(%p)", hook);
return mremap_hooks_.ExchangeSingular(hook);
}
extern "C"
MallocHook_PreSbrkHook MallocHook_SetPreSbrkHook(MallocHook_PreSbrkHook hook) {
RAW_VLOG(10, "SetPreSbrkHook(%p)", hook);
return presbrk_hooks_.ExchangeSingular(hook);
}
extern "C"
MallocHook_SbrkHook MallocHook_SetSbrkHook(MallocHook_SbrkHook hook) {
RAW_VLOG(10, "SetSbrkHook(%p)", hook);
return sbrk_hooks_.ExchangeSingular(hook);
}
// End of DEPRECATED code section.
// Note: embedding the function calls inside the traversal of HookList would be
// very confusing, as it is legal for a hook to remove itself and add other
// hooks. Doing traversal first, and then calling the hooks ensures we only
// call the hooks registered at the start.
#define INVOKE_HOOKS(HookType, hook_list, args) do { \
HookType hooks[kHookListMaxValues]; \
int num_hooks = hook_list.Traverse(hooks, kHookListMaxValues); \
for (int i = 0; i < num_hooks; ++i) { \
(*hooks[i])args; \
} \
} while (0)
// There should only be one replacement. Return the result of the first
// one, or false if there is none.
#define INVOKE_REPLACEMENT(HookType, hook_list, args) do { \
HookType hooks[kHookListMaxValues]; \
int num_hooks = hook_list.Traverse(hooks, kHookListMaxValues); \
return (num_hooks > 0 && (*hooks[0])args); \
} while (0)
void MallocHook::InvokeNewHookSlow(const void* p, size_t s) {
if (tcmalloc::IsEmergencyPtr(p)) {
return;
}
INVOKE_HOOKS(NewHook, new_hooks_, (p, s));
}
void MallocHook::InvokeDeleteHookSlow(const void* p) {
if (tcmalloc::IsEmergencyPtr(p)) {
return;
}
INVOKE_HOOKS(DeleteHook, delete_hooks_, (p));
}
void MallocHook::InvokePreMmapHookSlow(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset) {
INVOKE_HOOKS(PreMmapHook, premmap_hooks_, (start, size, protection, flags, fd,
offset));
}
void MallocHook::InvokeMmapHookSlow(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset) {
INVOKE_HOOKS(MmapHook, mmap_hooks_, (result, start, size, protection, flags,
fd, offset));
}
bool MallocHook::InvokeMmapReplacementSlow(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result) {
INVOKE_REPLACEMENT(MmapReplacement, mmap_replacement_,
(start, size, protection, flags, fd, offset, result));
}
void MallocHook::InvokeMunmapHookSlow(const void* p, size_t s) {
INVOKE_HOOKS(MunmapHook, munmap_hooks_, (p, s));
}
bool MallocHook::InvokeMunmapReplacementSlow(const void* p,
size_t s,
int* result) {
INVOKE_REPLACEMENT(MunmapReplacement, munmap_replacement_, (p, s, result));
}
void MallocHook::InvokeMremapHookSlow(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr) {
INVOKE_HOOKS(MremapHook, mremap_hooks_, (result, old_addr, old_size, new_size,
flags, new_addr));
}
void MallocHook::InvokePreSbrkHookSlow(ptrdiff_t increment) {
INVOKE_HOOKS(PreSbrkHook, presbrk_hooks_, (increment));
}
void MallocHook::InvokeSbrkHookSlow(const void* result, ptrdiff_t increment) {
INVOKE_HOOKS(SbrkHook, sbrk_hooks_, (result, increment));
}
#undef INVOKE_HOOKS
#ifndef NO_TCMALLOC_SAMPLES
DEFINE_ATTRIBUTE_SECTION_VARS(google_malloc);
DECLARE_ATTRIBUTE_SECTION_VARS(google_malloc);
// actual functions are in debugallocation.cc or tcmalloc.cc
DEFINE_ATTRIBUTE_SECTION_VARS(malloc_hook);
DECLARE_ATTRIBUTE_SECTION_VARS(malloc_hook);
// actual functions are in this file, malloc_hook.cc, and low_level_alloc.cc
#define ADDR_IN_ATTRIBUTE_SECTION(addr, name) \
(reinterpret_cast<uintptr_t>(ATTRIBUTE_SECTION_START(name)) <= \
reinterpret_cast<uintptr_t>(addr) && \
reinterpret_cast<uintptr_t>(addr) < \
reinterpret_cast<uintptr_t>(ATTRIBUTE_SECTION_STOP(name)))
// Return true iff 'caller' is a return address within a function
// that calls one of our hooks via MallocHook:Invoke*.
// A helper for GetCallerStackTrace.
static inline bool InHookCaller(const void* caller) {
return ADDR_IN_ATTRIBUTE_SECTION(caller, google_malloc) ||
ADDR_IN_ATTRIBUTE_SECTION(caller, malloc_hook);
// We can use one section for everything except tcmalloc_or_debug
// due to its special linkage mode, which prevents merging of the sections.
}
#undef ADDR_IN_ATTRIBUTE_SECTION
static bool checked_sections = false;
static inline void CheckInHookCaller() {
if (!checked_sections) {
INIT_ATTRIBUTE_SECTION_VARS(google_malloc);
if (ATTRIBUTE_SECTION_START(google_malloc) ==
ATTRIBUTE_SECTION_STOP(google_malloc)) {
RAW_LOG(ERROR, "google_malloc section is missing, "
"thus InHookCaller is broken!");
}
INIT_ATTRIBUTE_SECTION_VARS(malloc_hook);
if (ATTRIBUTE_SECTION_START(malloc_hook) ==
ATTRIBUTE_SECTION_STOP(malloc_hook)) {
RAW_LOG(ERROR, "malloc_hook section is missing, "
"thus InHookCaller is broken!");
}
checked_sections = true;
}
}
#endif // !NO_TCMALLOC_SAMPLES
// We can improve behavior/compactness of this function
// if we pass a generic test function (with a generic arg)
// into the implementations for GetStackTrace instead of the skip_count.
extern "C" int MallocHook_GetCallerStackTrace(void** result, int max_depth,
int skip_count) {
#if defined(NO_TCMALLOC_SAMPLES)
return 0;
#elif !defined(HAVE_ATTRIBUTE_SECTION_START)
// Fall back to GetStackTrace and good old but fragile frame skip counts.
// Note: this path is inaccurate when a hook is not called directly by an
// allocation function but is daisy-chained through another hook,
// search for MallocHook::(Get|Set|Invoke)* to find such cases.
return GetStackTrace(result, max_depth, skip_count + int(DEBUG_MODE));
// due to -foptimize-sibling-calls in opt mode
// there's no need for extra frame skip here then
#else
CheckInHookCaller();
// MallocHook caller determination via InHookCaller works, use it:
static const int kMaxSkip = 32 + 6 + 3;
// Constant tuned to do just one GetStackTrace call below in practice
// and not get many frames that we don't actually need:
// currently max passsed max_depth is 32,
// max passed/needed skip_count is 6
// and 3 is to account for some hook daisy chaining.
static const int kStackSize = kMaxSkip + 1;
void* stack[kStackSize];
int depth = GetStackTrace(stack, kStackSize, 1); // skip this function frame
if (depth == 0) // silenty propagate cases when GetStackTrace does not work
return 0;
for (int i = 0; i < depth; ++i) { // stack[0] is our immediate caller
if (InHookCaller(stack[i])) {
RAW_VLOG(10, "Found hooked allocator at %d: %p <- %p",
i, stack[i], stack[i+1]);
i += 1; // skip hook caller frame
depth -= i; // correct depth
if (depth > max_depth) depth = max_depth;
copy(stack + i, stack + i + depth, result);
if (depth < max_depth && depth + i == kStackSize) {
// get frames for the missing depth
depth +=
GetStackTrace(result + depth, max_depth - depth, 1 + kStackSize);
}
return depth;
}
}
RAW_LOG(WARNING, "Hooked allocator frame not found, returning empty trace");
// If this happens try increasing kMaxSkip
// or else something must be wrong with InHookCaller,
// e.g. for every section used in InHookCaller
// all functions in that section must be inside the same library.
return 0;
#endif
}
// On systems where we know how, we override mmap/munmap/mremap/sbrk
// to provide support for calling the related hooks (in addition,
// of course, to doing what these functions normally do).
#if defined(__linux)
# include "malloc_hook_mmap_linux.h"
#elif defined(__FreeBSD__)
# include "malloc_hook_mmap_freebsd.h"
#else
/*static*/void* MallocHook::UnhookedMMap(void *start, size_t length, int prot,
int flags, int fd, off_t offset) {
void* result;
if (!MallocHook::InvokeMmapReplacement(
start, length, prot, flags, fd, offset, &result)) {
result = mmap(start, length, prot, flags, fd, offset);
}
return result;
}
/*static*/int MallocHook::UnhookedMUnmap(void *start, size_t length) {
int result;
if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) {
result = munmap(start, length);
}
return result;
}
#endif

View File

@ -1,135 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Override mmap/munmap/mremap/sbrk to provide support for calling the
// related hooks (in addition, of course, to doing what these
// functions normally do).
#ifndef __FreeBSD__
# error Should only be including malloc_hook_mmap_freebsd.h on FreeBSD systems.
#endif
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/mman.h>
#include <errno.h>
#include <dlfcn.h>
// Make sure mmap doesn't get #define'd away by <sys/mman.h>
#undef mmap
// According to the FreeBSD documentation, use syscall if you do not
// need 64-bit alignment otherwise use __syscall. Indeed, syscall
// doesn't work correctly in most situations on 64-bit. It's return
// type is 'int' so for things like SYS_mmap, it actually truncates
// the returned address to 32-bits.
#if defined(__amd64__) || defined(__x86_64__)
# define MALLOC_HOOK_SYSCALL __syscall
#else
# define MALLOC_HOOK_SYSCALL syscall
#endif
extern "C" {
void* mmap(void *start, size_t length,int prot, int flags,
int fd, off_t offset) __THROW
ATTRIBUTE_SECTION(malloc_hook);
int munmap(void* start, size_t length) __THROW
ATTRIBUTE_SECTION(malloc_hook);
void* sbrk(intptr_t increment) __THROW
ATTRIBUTE_SECTION(malloc_hook);
}
static inline void* do_mmap(void *start, size_t length,
int prot, int flags,
int fd, off_t offset) __THROW {
return (void *)MALLOC_HOOK_SYSCALL(SYS_mmap,
start, length, prot, flags, fd, offset);
}
static inline void* do_sbrk(intptr_t increment) {
static void *(*libc_sbrk)(intptr_t);
if (libc_sbrk == NULL)
libc_sbrk = (void *(*)(intptr_t))dlsym(RTLD_NEXT, "sbrk");
return libc_sbrk(increment);
}
extern "C" void* mmap(void *start, size_t length, int prot, int flags,
int fd, off_t offset) __THROW {
MallocHook::InvokePreMmapHook(start, length, prot, flags, fd, offset);
void *result;
if (!MallocHook::InvokeMmapReplacement(
start, length, prot, flags, fd, offset, &result)) {
result = do_mmap(start, length, prot, flags, fd,
static_cast<size_t>(offset)); // avoid sign extension
}
MallocHook::InvokeMmapHook(result, start, length, prot, flags, fd, offset);
return result;
}
extern "C" int munmap(void* start, size_t length) __THROW {
MallocHook::InvokeMunmapHook(start, length);
int result;
if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) {
result = MALLOC_HOOK_SYSCALL(SYS_munmap, start, length);
}
return result;
}
extern "C" void* sbrk(intptr_t increment) __THROW {
MallocHook::InvokePreSbrkHook(increment);
void *result = do_sbrk(increment);
MallocHook::InvokeSbrkHook(result, increment);
return result;
}
/*static*/void* MallocHook::UnhookedMMap(void *start, size_t length, int prot,
int flags, int fd, off_t offset) {
void* result;
if (!MallocHook::InvokeMmapReplacement(
start, length, prot, flags, fd, offset, &result)) {
result = do_mmap(start, length, prot, flags, fd, offset);
}
return result;
}
/*static*/int MallocHook::UnhookedMUnmap(void *start, size_t length) {
int result;
if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) {
result = MALLOC_HOOK_SYSCALL(SYS_munmap, start, length);
}
return result;
}
#undef MALLOC_HOOK_SYSCALL

View File

@ -1,238 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
// We define mmap() and mmap64(), which somewhat reimplements libc's mmap
// syscall stubs. Unfortunately libc only exports the stubs via weak symbols
// (which we're overriding with our mmap64() and mmap() wrappers) so we can't
// just call through to them.
#ifndef __linux
# error Should only be including malloc_hook_mmap_linux.h on linux systems.
#endif
#include <unistd.h>
#include <syscall.h>
#include <sys/mman.h>
#include <errno.h>
#include "base/linux_syscall_support.h"
// The x86-32 case and the x86-64 case differ:
// 32b has a mmap2() syscall, 64b does not.
// 64b and 32b have different calling conventions for mmap().
// I test for 64-bit first so I don't have to do things like
// '#if (defined(__mips__) && !defined(__MIPS64__))' as a mips32 check.
#if defined(__x86_64__) || defined(__PPC64__) || defined(__aarch64__) || (defined(_MIPS_SIM) && _MIPS_SIM == _ABI64) || defined(__s390__)
static inline void* do_mmap64(void *start, size_t length,
int prot, int flags,
int fd, __off64_t offset) __THROW {
return sys_mmap(start, length, prot, flags, fd, offset);
}
#define MALLOC_HOOK_HAVE_DO_MMAP64 1
#elif defined(__i386__) || defined(__PPC__) || defined(__mips__) || \
defined(__arm__)
static inline void* do_mmap64(void *start, size_t length,
int prot, int flags,
int fd, __off64_t offset) __THROW {
void *result;
// Try mmap2() unless it's not supported
static bool have_mmap2 = true;
if (have_mmap2) {
static int pagesize = 0;
if (!pagesize) pagesize = getpagesize();
// Check that the offset is page aligned
if (offset & (pagesize - 1)) {
result = MAP_FAILED;
errno = EINVAL;
goto out;
}
result = (void *)syscall(SYS_mmap2,
start, length, prot, flags, fd,
(off_t) (offset / pagesize));
if (result != MAP_FAILED || errno != ENOSYS) goto out;
// We don't have mmap2() after all - don't bother trying it in future
have_mmap2 = false;
}
if (((off_t)offset) != offset) {
// If we're trying to map a 64-bit offset, fail now since we don't
// have 64-bit mmap() support.
result = MAP_FAILED;
errno = EINVAL;
goto out;
}
#ifdef __NR_mmap
{
// Fall back to old 32-bit offset mmap() call
// Old syscall interface cannot handle six args, so pass in an array
int32 args[6] = { (int32) start, (int32) length, prot, flags, fd,
(int32)(off_t) offset };
result = (void *)syscall(SYS_mmap, args);
}
#else
// Some Linux ports like ARM EABI Linux has no mmap, just mmap2.
result = MAP_FAILED;
#endif
out:
return result;
}
#define MALLOC_HOOK_HAVE_DO_MMAP64 1
#endif // #if defined(__x86_64__)
#ifdef MALLOC_HOOK_HAVE_DO_MMAP64
// We use do_mmap64 abstraction to put MallocHook::InvokeMmapHook
// calls right into mmap and mmap64, so that the stack frames in the caller's
// stack are at the same offsets for all the calls of memory allocating
// functions.
// Put all callers of MallocHook::Invoke* in this module into
// malloc_hook section,
// so that MallocHook::GetCallerStackTrace can function accurately:
// Make sure mmap doesn't get #define'd away by <sys/mman.h>
# undef mmap
extern "C" {
void* mmap64(void *start, size_t length, int prot, int flags,
int fd, __off64_t offset ) __THROW
ATTRIBUTE_SECTION(malloc_hook);
void* mmap(void *start, size_t length,int prot, int flags,
int fd, off_t offset) __THROW
ATTRIBUTE_SECTION(malloc_hook);
int munmap(void* start, size_t length) __THROW
ATTRIBUTE_SECTION(malloc_hook);
void* mremap(void* old_addr, size_t old_size, size_t new_size,
int flags, ...) __THROW
ATTRIBUTE_SECTION(malloc_hook);
void* sbrk(ptrdiff_t increment) __THROW
ATTRIBUTE_SECTION(malloc_hook);
}
extern "C" void* mmap64(void *start, size_t length, int prot, int flags,
int fd, __off64_t offset) __THROW {
MallocHook::InvokePreMmapHook(start, length, prot, flags, fd, offset);
void *result;
if (!MallocHook::InvokeMmapReplacement(
start, length, prot, flags, fd, offset, &result)) {
result = do_mmap64(start, length, prot, flags, fd, offset);
}
MallocHook::InvokeMmapHook(result, start, length, prot, flags, fd, offset);
return result;
}
# if !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH)
extern "C" void* mmap(void *start, size_t length, int prot, int flags,
int fd, off_t offset) __THROW {
MallocHook::InvokePreMmapHook(start, length, prot, flags, fd, offset);
void *result;
if (!MallocHook::InvokeMmapReplacement(
start, length, prot, flags, fd, offset, &result)) {
result = do_mmap64(start, length, prot, flags, fd,
static_cast<size_t>(offset)); // avoid sign extension
}
MallocHook::InvokeMmapHook(result, start, length, prot, flags, fd, offset);
return result;
}
# endif // !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH)
extern "C" int munmap(void* start, size_t length) __THROW {
MallocHook::InvokeMunmapHook(start, length);
int result;
if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) {
result = sys_munmap(start, length);
}
return result;
}
extern "C" void* mremap(void* old_addr, size_t old_size, size_t new_size,
int flags, ...) __THROW {
va_list ap;
va_start(ap, flags);
void *new_address = va_arg(ap, void *);
va_end(ap);
void* result = sys_mremap(old_addr, old_size, new_size, flags, new_address);
MallocHook::InvokeMremapHook(result, old_addr, old_size, new_size, flags,
new_address);
return result;
}
#ifndef __UCLIBC__
// libc's version:
extern "C" void* __sbrk(ptrdiff_t increment);
extern "C" void* sbrk(ptrdiff_t increment) __THROW {
MallocHook::InvokePreSbrkHook(increment);
void *result = __sbrk(increment);
MallocHook::InvokeSbrkHook(result, increment);
return result;
}
#endif
/*static*/void* MallocHook::UnhookedMMap(void *start, size_t length, int prot,
int flags, int fd, off_t offset) {
void* result;
if (!MallocHook::InvokeMmapReplacement(
start, length, prot, flags, fd, offset, &result)) {
result = do_mmap64(start, length, prot, flags, fd, offset);
}
return result;
}
/*static*/int MallocHook::UnhookedMUnmap(void *start, size_t length) {
int result;
if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) {
result = syscall(SYS_munmap, start, length);
}
return result;
}
#undef MALLOC_HOOK_HAVE_DO_MMAP64
#endif // #ifdef MALLOC_HOOK_HAVE_DO_MMAP64

View File

@ -1,55 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2014, gperftools Contributors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef MAYBE_EMERGENCY_MALLOC_H
#define MAYBE_EMERGENCY_MALLOC_H
#include "config.h"
#ifdef ENABLE_EMERGENCY_MALLOC
#include "emergency_malloc.h"
#else
namespace tcmalloc {
static inline void *EmergencyMalloc(size_t size) {return NULL;}
static inline void EmergencyFree(void *p) {}
static inline void *EmergencyCalloc(size_t n, size_t elem_size) {return NULL;}
static inline void *EmergencyRealloc(void *old_ptr, size_t new_size) {return NULL;}
static inline bool IsEmergencyPtr(const void *_ptr) {
return false;
}
}
#endif // ENABLE_EMERGENCY_MALLOC
#endif

Some files were not shown because too many files have changed in this diff Show More