Merge branch 'refactor_databases' into database_atomic_merge_tmp

This commit is contained in:
Alexander Tokmakov 2019-12-26 00:17:49 +03:00
commit bab12f4c9c
1140 changed files with 23748 additions and 47416 deletions

34
.gitmodules vendored
View File

@ -29,9 +29,6 @@
[submodule "contrib/re2"] [submodule "contrib/re2"]
path = contrib/re2 path = contrib/re2
url = https://github.com/google/re2.git url = https://github.com/google/re2.git
[submodule "contrib/ssl"]
path = contrib/ssl
url = https://github.com/ClickHouse-Extras/ssl.git
[submodule "contrib/llvm"] [submodule "contrib/llvm"]
path = contrib/llvm path = contrib/llvm
url = https://github.com/ClickHouse-Extras/llvm url = https://github.com/ClickHouse-Extras/llvm
@ -70,10 +67,10 @@
url = https://github.com/ClickHouse-Extras/libgsasl.git url = https://github.com/ClickHouse-Extras/libgsasl.git
[submodule "contrib/libcxx"] [submodule "contrib/libcxx"]
path = contrib/libcxx path = contrib/libcxx
url = https://github.com/llvm-mirror/libcxx.git url = https://github.com/ClickHouse-Extras/libcxx.git
[submodule "contrib/libcxxabi"] [submodule "contrib/libcxxabi"]
path = contrib/libcxxabi path = contrib/libcxxabi
url = https://github.com/llvm-mirror/libcxxabi.git url = https://github.com/ClickHouse-Extras/libcxxabi.git
[submodule "contrib/snappy"] [submodule "contrib/snappy"]
path = contrib/snappy path = contrib/snappy
url = https://github.com/google/snappy url = https://github.com/google/snappy
@ -107,3 +104,30 @@
[submodule "contrib/sparsehash-c11"] [submodule "contrib/sparsehash-c11"]
path = contrib/sparsehash-c11 path = contrib/sparsehash-c11
url = https://github.com/sparsehash/sparsehash-c11.git url = https://github.com/sparsehash/sparsehash-c11.git
[submodule "contrib/aws"]
path = contrib/aws
url = https://github.com/aws/aws-sdk-cpp.git
[submodule "aws-c-event-stream"]
path = contrib/aws-c-event-stream
url = https://github.com/awslabs/aws-c-event-stream.git
[submodule "aws-c-common"]
path = contrib/aws-c-common
url = https://github.com/awslabs/aws-c-common.git
[submodule "aws-checksums"]
path = contrib/aws-checksums
url = https://github.com/awslabs/aws-checksums.git
[submodule "contrib/curl"]
path = contrib/curl
url = https://github.com/curl/curl.git
[submodule "contrib/openssl"]
path = contrib/openssl
url = https://github.com/ClickHouse-Extras/openssl.git
[submodule "contrib/icudata"]
path = contrib/icudata
url = https://github.com/ClickHouse-Extras/icudata.git
[submodule "contrib/icu"]
path = contrib/icu
url = https://github.com/unicode-org/icu.git
[submodule "contrib/libc-headers"]
path = contrib/libc-headers
url = https://github.com/ClickHouse-Extras/libc-headers.git

View File

@ -128,6 +128,8 @@ Yu](https://github.com/yuzhichang))
* Introduce CustomSeparated data format that supports custom escaping and * Introduce CustomSeparated data format that supports custom escaping and
delimiter rules. [#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) delimiter rules. [#7118](https://github.com/ClickHouse/ClickHouse/pull/7118)
([tavplubix](https://github.com/tavplubix)) ([tavplubix](https://github.com/tavplubix))
* Support Redis as source of external dictionary. [#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Anton
Popov](https://github.com/CurtizJ))
### Bug Fix ### Bug Fix
* Fix wrong query result if it has `WHERE IN (SELECT ...)` section and `optimize_read_in_order` is * Fix wrong query result if it has `WHERE IN (SELECT ...)` section and `optimize_read_in_order` is

View File

@ -206,6 +206,13 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${C
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}")
if (COMPILER_CLANG)
# Exception unwinding doesn't work in clang release build without this option
# TODO investigate if contrib/libcxxabi is out of date
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer")
endif ()
option (ENABLE_LIBRARIES "Enable all libraries (Global default switch)" ON) option (ENABLE_LIBRARIES "Enable all libraries (Global default switch)" ON)
option (UNBUNDLED "Try find all libraries in system. We recommend to avoid this mode for production builds, because we cannot guarantee exact versions and variants of libraries your system has installed. This mode exists for enthusiastic developers who search for trouble. Also it is useful for maintainers of OS packages." OFF) option (UNBUNDLED "Try find all libraries in system. We recommend to avoid this mode for production builds, because we cannot guarantee exact versions and variants of libraries your system has installed. This mode exists for enthusiastic developers who search for trouble. Also it is useful for maintainers of OS packages." OFF)
@ -215,7 +222,7 @@ else ()
set(NOT_UNBUNDLED 1) set(NOT_UNBUNDLED 1)
endif () endif ()
# Using system libs can cause lot of warnings in includes. # Using system libs can cause lot of warnings in includes (on macro expansion).
if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32) if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32)
option (NO_WERROR "Disable -Werror compiler option" ON) option (NO_WERROR "Disable -Werror compiler option" ON)
endif () endif ()
@ -325,6 +332,7 @@ include (cmake/find/brotli.cmake)
include (cmake/find/protobuf.cmake) include (cmake/find/protobuf.cmake)
include (cmake/find/pdqsort.cmake) include (cmake/find/pdqsort.cmake)
include (cmake/find/hdfs3.cmake) # uses protobuf include (cmake/find/hdfs3.cmake) # uses protobuf
include (cmake/find/s3.cmake)
include (cmake/find/consistent-hashing.cmake) include (cmake/find/consistent-hashing.cmake)
include (cmake/find/base64.cmake) include (cmake/find/base64.cmake)
include (cmake/find/parquet.cmake) include (cmake/find/parquet.cmake)
@ -344,7 +352,6 @@ if (ENABLE_TESTS)
endif () endif ()
# Need to process before "contrib" dir: # Need to process before "contrib" dir:
include (libs/libcommon/cmake/find_gperftools.cmake)
include (libs/libcommon/cmake/find_jemalloc.cmake) include (libs/libcommon/cmake/find_jemalloc.cmake)
include (libs/libcommon/cmake/find_cctz.cmake) include (libs/libcommon/cmake/find_cctz.cmake)
include (libs/libmysqlxx/cmake/find_mysqlclient.cmake) include (libs/libmysqlxx/cmake/find_mysqlclient.cmake)
@ -354,18 +361,6 @@ include (libs/libmysqlxx/cmake/find_mysqlclient.cmake)
if (USE_JEMALLOC) if (USE_JEMALLOC)
message (STATUS "Link jemalloc: ${JEMALLOC_LIBRARIES}") message (STATUS "Link jemalloc: ${JEMALLOC_LIBRARIES}")
set (MALLOC_LIBRARIES ${JEMALLOC_LIBRARIES}) set (MALLOC_LIBRARIES ${JEMALLOC_LIBRARIES})
elseif (USE_TCMALLOC)
if (DEBUG_TCMALLOC AND NOT GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG)
message (FATAL_ERROR "Requested DEBUG_TCMALLOC but debug library is not found. You should install Google Perftools. Example: sudo apt-get install libgoogle-perftools-dev")
endif ()
if (DEBUG_TCMALLOC AND GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG)
message (STATUS "Link libtcmalloc_minimal_debug for testing: ${GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG}")
set (MALLOC_LIBRARIES ${GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG})
else ()
message (STATUS "Link libtcmalloc_minimal: ${GPERFTOOLS_TCMALLOC_MINIMAL}")
set (MALLOC_LIBRARIES ${GPERFTOOLS_TCMALLOC_MINIMAL})
endif ()
elseif (SANITIZE) elseif (SANITIZE)
message (STATUS "Will use ${SANITIZE} sanitizer.") message (STATUS "Will use ${SANITIZE} sanitizer.")
elseif (OS_LINUX) elseif (OS_LINUX)

View File

@ -11,7 +11,3 @@ ClickHouse is an open-source column-oriented database management system that all
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announces and reports about events. * [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announces and reports about events.
* [Contacts](https://clickhouse.yandex/#contacts) can help to get your questions answered if there are any. * [Contacts](https://clickhouse.yandex/#contacts) can help to get your questions answered if there are any.
* You can also [fill this form](https://forms.yandex.com/surveys/meet-yandex-clickhouse-team/) to meet Yandex ClickHouse team in person. * You can also [fill this form](https://forms.yandex.com/surveys/meet-yandex-clickhouse-team/) to meet Yandex ClickHouse team in person.
## Upcoming Events
* [ClickHouse Meetup in Moscow](https://yandex.ru/promo/clickhouse/moscow-december-2019) on December 11.

View File

@ -1,61 +0,0 @@
# https://github.com/vast-io/vast/blob/master/cmake/FindGperftools.cmake
# Tries to find Gperftools.
#
# Usage of this module as follows:
#
# find_package(Gperftools)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# Gperftools_ROOT_DIR Set this variable to the root installation of
# Gperftools if the module has problems finding
# the proper installation path.
#
# Variables defined by this module:
#
# GPERFTOOLS_FOUND System has Gperftools libs/headers
# GPERFTOOLS_LIBRARIES The Gperftools libraries (tcmalloc & profiler)
# GPERFTOOLS_INCLUDE_DIR The location of Gperftools headers
find_library(GPERFTOOLS_TCMALLOC
NAMES tcmalloc
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_TCMALLOC_MINIMAL
NAMES tcmalloc_minimal
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG
NAMES tcmalloc_minimal_debug
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_PROFILER
NAMES profiler
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_TCMALLOC_AND_PROFILER
NAMES tcmalloc_and_profiler
HINTS ${Gperftools_ROOT_DIR}/lib)
find_path(GPERFTOOLS_INCLUDE_DIR
NAMES gperftools/heap-profiler.h
HINTS ${Gperftools_ROOT_DIR}/include)
set(GPERFTOOLS_LIBRARIES ${GPERFTOOLS_TCMALLOC_AND_PROFILER})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(
Gperftools
DEFAULT_MSG
GPERFTOOLS_LIBRARIES
GPERFTOOLS_INCLUDE_DIR)
mark_as_advanced(
Gperftools_ROOT_DIR
GPERFTOOLS_TCMALLOC
GPERFTOOLS_PROFILER
GPERFTOOLS_TCMALLOC_AND_PROFILER
GPERFTOOLS_LIBRARIES
GPERFTOOLS_INCLUDE_DIR)

View File

@ -19,6 +19,6 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
set (ARCH_PPC64LE 1) set (ARCH_PPC64LE 1)
# FIXME: move this check into tools.cmake # FIXME: move this check into tools.cmake
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8)) if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
message(FATAL_ERROR "Only gcc-8 is supported for powerpc architecture") message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
endif () endif ()
endif () endif ()

View File

@ -1,7 +1,5 @@
if (COMPILER_CLANG) option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++" ${NOT_UNBUNDLED})
option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++" ON) option (USE_INTERNAL_LIBCXX_LIBRARY "Set to FALSE to use system libcxx and libcxxabi libraries instead of bundled" ${NOT_UNBUNDLED})
option (USE_INTERNAL_LIBCXX_LIBRARY "Set to FALSE to use system libcxx and libcxxabi libraries instead of bundled" ${NOT_UNBUNDLED})
endif()
if (USE_LIBCXX) if (USE_LIBCXX)
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_DEBUG=0") # More checks in debug build. set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_DEBUG=0") # More checks in debug build.

View File

@ -1,6 +1,18 @@
option(ENABLE_ICU "Enable ICU" ${ENABLE_LIBRARIES}) option(ENABLE_ICU "Enable ICU" ${ENABLE_LIBRARIES})
if(ENABLE_ICU) if (ENABLE_ICU)
option (USE_INTERNAL_ICU_LIBRARY "Set to FALSE to use system ICU library instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/LICENSE")
if (USE_INTERNAL_ICU_LIBRARY)
message (WARNING "submodule contrib/icu is missing. to fix try run: \n git submodule update --init --recursive")
set (USE_INTERNAL_ICU_LIBRARY 0)
endif ()
set (MISSING_INTERNAL_ICU_LIBRARY 1)
endif ()
if(NOT USE_INTERNAL_ICU_LIBRARY)
if (APPLE) if (APPLE)
set(ICU_ROOT "/usr/local/opt/icu4c" CACHE STRING "") set(ICU_ROOT "/usr/local/opt/icu4c" CACHE STRING "")
endif() endif()
@ -11,6 +23,16 @@ if(ENABLE_ICU)
endif() endif()
endif() endif()
if (ICU_LIBRARY AND ICU_INCLUDE_DIR)
set (USE_ICU 1)
elseif (NOT MISSING_INTERNAL_ICU_LIBRARY)
set (USE_INTERNAL_ICU_LIBRARY 1)
set (ICU_LIBRARIES icui18n icuuc icudata)
set (USE_ICU 1)
endif ()
endif()
if(USE_ICU) if(USE_ICU)
message(STATUS "Using icu=${USE_ICU}: ${ICU_INCLUDE_DIR} : ${ICU_LIBRARIES}") message(STATUS "Using icu=${USE_ICU}: ${ICU_INCLUDE_DIR} : ${ICU_LIBRARIES}")
else() else()

View File

@ -1,7 +1,7 @@
# Broken in macos. TODO: update clang, re-test, enable # Broken in macos. TODO: update clang, re-test, enable
if (NOT APPLE) if (NOT APPLE)
option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile' option for query execution" ${ENABLE_LIBRARIES}) option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile_expressions' option for query execution" ${ENABLE_LIBRARIES})
option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library. Default: system library for quicker developer builds." 0) option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library." ${NOT_UNBUNDLED})
endif () endif ()
if (ENABLE_EMBEDDED_COMPILER) if (ENABLE_EMBEDDED_COMPILER)
@ -13,27 +13,11 @@ if (ENABLE_EMBEDDED_COMPILER)
if (NOT USE_INTERNAL_LLVM_LIBRARY) if (NOT USE_INTERNAL_LLVM_LIBRARY)
set (LLVM_PATHS "/usr/local/lib/llvm") set (LLVM_PATHS "/usr/local/lib/llvm")
if (LLVM_VERSION) foreach(llvm_v 9 8)
find_package(LLVM ${LLVM_VERSION} CONFIG PATHS ${LLVM_PATHS}) if (NOT LLVM_FOUND)
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS})
find_package(LLVM ${CMAKE_CXX_COMPILER_VERSION} CONFIG PATHS ${LLVM_PATHS})
else ()
# TODO: 9 8
foreach(llvm_v 7.1 7 6 5)
if (NOT LLVM_FOUND)
find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS})
endif ()
endforeach ()
endif ()
if (LLVM_FOUND)
find_library (LLD_LIBRARY_TEST lldCore PATHS ${LLVM_LIBRARY_DIRS})
find_path (LLD_INCLUDE_DIR_TEST NAMES lld/Core/AbsoluteAtom.h PATHS ${LLVM_INCLUDE_DIRS})
if (NOT LLD_LIBRARY_TEST OR NOT LLD_INCLUDE_DIR_TEST)
set (LLVM_FOUND 0)
message(WARNING "liblld (${LLD_LIBRARY_TEST}, ${LLD_INCLUDE_DIR_TEST}) not found in ${LLVM_INCLUDE_DIRS} ${LLVM_LIBRARY_DIRS}. Disabling internal compiler.")
endif () endif ()
endif () endforeach ()
if (LLVM_FOUND) if (LLVM_FOUND)
# Remove dynamically-linked zlib and libedit from LLVM's dependencies: # Remove dynamically-linked zlib and libedit from LLVM's dependencies:
@ -51,30 +35,39 @@ if (ENABLE_EMBEDDED_COMPILER)
set (LLVM_FOUND 0) set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0) set (USE_EMBEDDED_COMPILER 0)
endif () endif ()
# TODO: fix llvm 8+ and remove:
if (LLVM_FOUND AND LLVM_VERSION_MAJOR GREATER 7)
message(WARNING "LLVM 8+ not supported yet, disabling.")
set (USE_EMBEDDED_COMPILER 0)
endif ()
else() else()
set (LLVM_FOUND 1) if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR)
set (USE_EMBEDDED_COMPILER 1) message(WARNING "Option ENABLE_EMBEDDED_COMPILER is set but LLVM library cannot build if build directory is the same as source directory.")
set (LLVM_VERSION "7.0.0bundled") set (LLVM_FOUND 0)
set (LLVM_INCLUDE_DIRS set (USE_EMBEDDED_COMPILER 0)
${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/include elseif (SPLIT_SHARED_LIBRARIES)
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/include # llvm-tablegen cannot find shared libraries that we build. Probably can be easily fixed.
${ClickHouse_SOURCE_DIR}/contrib/llvm/clang/include message(WARNING "Option ENABLE_EMBEDDED_COMPILER is not compatible with SPLIT_SHARED_LIBRARIES. Build of LLVM will be disabled.")
${ClickHouse_BINARY_DIR}/contrib/llvm/clang/include set (LLVM_FOUND 0)
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/tools/clang/include set (USE_EMBEDDED_COMPILER 0)
${ClickHouse_SOURCE_DIR}/contrib/llvm/lld/include elseif (NOT ARCH_AMD64)
${ClickHouse_BINARY_DIR}/contrib/llvm/lld/include # It's not supported yet, but you can help.
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/tools/lld/include) message(WARNING "Option ENABLE_EMBEDDED_COMPILER is only available for x86_64. Build of LLVM will be disabled.")
set (LLVM_LIBRARY_DIRS ${ClickHouse_BINARY_DIR}/contrib/llvm/llvm) set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0)
elseif (SANITIZE STREQUAL "undefined")
# llvm-tblgen, that is used during LLVM build, doesn't work with UBSan.
message(WARNING "Option ENABLE_EMBEDDED_COMPILER does not work with UBSan, because 'llvm-tblgen' tool from LLVM has undefined behaviour. Build of LLVM will be disabled.")
set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0)
else ()
set (LLVM_FOUND 1)
set (USE_EMBEDDED_COMPILER 1)
set (LLVM_VERSION "9.0.0bundled")
set (LLVM_INCLUDE_DIRS
${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/include
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/include
)
set (LLVM_LIBRARY_DIRS ${ClickHouse_BINARY_DIR}/contrib/llvm/llvm)
endif()
endif() endif()
if (LLVM_FOUND) if (LLVM_FOUND)
message(STATUS "LLVM version: ${LLVM_PACKAGE_VERSION}")
message(STATUS "LLVM include Directory: ${LLVM_INCLUDE_DIRS}") message(STATUS "LLVM include Directory: ${LLVM_INCLUDE_DIRS}")
message(STATUS "LLVM library Directory: ${LLVM_LIBRARY_DIRS}") message(STATUS "LLVM library Directory: ${LLVM_LIBRARY_DIRS}")
message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}") message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}")
@ -82,16 +75,53 @@ if (ENABLE_EMBEDDED_COMPILER)
endif() endif()
function(llvm_libs_all REQUIRED_LLVM_LIBRARIES) # This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
llvm_map_components_to_libnames (result all) set (REQUIRED_LLVM_LIBRARIES
if (USE_STATIC_LIBRARIES OR NOT "LLVM" IN_LIST result) LLVMOrcJIT
list (REMOVE_ITEM result "LTO" "LLVM") LLVMExecutionEngine
else() LLVMRuntimeDyld
set (result "LLVM") LLVMX86CodeGen
endif () LLVMX86Desc
if (TERMCAP_LIBRARY) LLVMX86Info
list (APPEND result ${TERMCAP_LIBRARY}) LLVMX86Utils
endif () LLVMAsmPrinter
list (APPEND result ${CMAKE_DL_LIBS} ${ZLIB_LIBRARIES}) LLVMDebugInfoDWARF
set (${REQUIRED_LLVM_LIBRARIES} ${result} PARENT_SCOPE) LLVMGlobalISel
endfunction() LLVMSelectionDAG
LLVMMCDisassembler
LLVMPasses
LLVMCodeGen
LLVMipo
LLVMBitWriter
LLVMInstrumentation
LLVMScalarOpts
LLVMAggressiveInstCombine
LLVMInstCombine
LLVMVectorize
LLVMTransformUtils
LLVMTarget
LLVMAnalysis
LLVMProfileData
LLVMObject
LLVMBitReader
LLVMCore
LLVMRemarks
LLVMBitstreamReader
LLVMMCParser
LLVMMC
LLVMBinaryFormat
LLVMDebugInfoCodeView
LLVMSupport
LLVMDemangle
)
#function(llvm_libs_all REQUIRED_LLVM_LIBRARIES)
# llvm_map_components_to_libnames (result all)
# if (USE_STATIC_LIBRARIES OR NOT "LLVM" IN_LIST result)
# list (REMOVE_ITEM result "LTO" "LLVM")
# else()
# set (result "LLVM")
# endif ()
# list (APPEND result ${CMAKE_DL_LIBS} ${ZLIB_LIBRARIES})
# set (${REQUIRED_LLVM_LIBRARIES} ${result} PARENT_SCOPE)
#endfunction()

26
cmake/find/s3.cmake Normal file
View File

@ -0,0 +1,26 @@
if(NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_ARM)
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
endif()
if(ENABLE_S3)
option(USE_INTERNAL_AWS_S3_LIBRARY "Set to FALSE to use system S3 instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3")
message (WARNING "submodule contrib/aws is missing. to fix try run: \n git submodule update --init --recursive")
set (MISSING_AWS_S3 1)
endif ()
if (USE_INTERNAL_AWS_S3_LIBRARY AND NOT MISSING_AWS_S3)
set(AWS_S3_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3/include")
set(AWS_S3_CORE_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core/include")
set(AWS_S3_LIBRARY aws_s3)
set(USE_INTERNAL_AWS_S3_LIBRARY 1)
set(USE_AWS_S3 1)
else()
set(USE_INTERNAL_AWS_S3_LIBRARY 0)
set(USE_AWS_S3 0)
endif ()
endif()
message (STATUS "Using aws_s3=${USE_AWS_S3}: ${AWS_S3_INCLUDE_DIR} : ${AWS_S3_LIBRARY}")

View File

@ -6,9 +6,9 @@ if(NOT ARCH_32)
option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED}) option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED})
endif() endif()
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/ssl/CMakeLists.txt") if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/openssl/README")
if(USE_INTERNAL_SSL_LIBRARY) if(USE_INTERNAL_SSL_LIBRARY)
message(WARNING "submodule contrib/ssl is missing. to fix try run: \n git submodule update --init --recursive") message(WARNING "submodule contrib/openssl is missing. to fix try run: \n git submodule update --init --recursive")
endif() endif()
set(USE_INTERNAL_SSL_LIBRARY 0) set(USE_INTERNAL_SSL_LIBRARY 0)
set(MISSING_INTERNAL_SSL_LIBRARY 1) set(MISSING_INTERNAL_SSL_LIBRARY 1)
@ -42,17 +42,17 @@ endif ()
if (NOT OPENSSL_FOUND AND NOT MISSING_INTERNAL_SSL_LIBRARY) if (NOT OPENSSL_FOUND AND NOT MISSING_INTERNAL_SSL_LIBRARY)
set (USE_INTERNAL_SSL_LIBRARY 1) set (USE_INTERNAL_SSL_LIBRARY 1)
set (OPENSSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/ssl") set (OPENSSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/openssl")
set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include")
if (NOT USE_STATIC_LIBRARIES AND TARGET crypto-shared AND TARGET ssl-shared) if (ARCH_AMD64)
set (OPENSSL_CRYPTO_LIBRARY crypto-shared) set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include" "${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake/linux_x86_64/include")
set (OPENSSL_SSL_LIBRARY ssl-shared) elseif (ARCH_AARCH64)
else () set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include" "${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake/linux_aarch64/include")
set (OPENSSL_CRYPTO_LIBRARY crypto)
set (OPENSSL_SSL_LIBRARY ssl)
endif () endif ()
set (OPENSSL_LIBRARIES ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) set (OPENSSL_CRYPTO_LIBRARY crypto)
set (OPENSSL_SSL_LIBRARY ssl)
set (OPENSSL_FOUND 1) set (OPENSSL_FOUND 1)
set (OPENSSL_LIBRARIES ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY})
endif () endif ()
if(OPENSSL_FOUND) if(OPENSSL_FOUND)

View File

@ -18,6 +18,14 @@ message(STATUS "Default libraries: ${DEFAULT_LIBS}")
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS}) set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS}) set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
# glibc-compatibility library relies to fixed version of libc headers
# (because minor changes in function attributes between different glibc versions will introduce incompatibilities)
# This is for x86_64. For other architectures we have separate toolchains.
if (ARCH_AMD64)
set(CMAKE_C_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers)
set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers)
endif ()
# Global libraries # Global libraries
add_library(global-libs INTERFACE) add_library(global-libs INTERFACE)

View File

@ -23,7 +23,7 @@ if (SANITIZE)
# RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to # RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to
# keep the binary size down. # keep the binary size down.
# TODO: try compiling with -Og and with ld.gold. # TODO: try compiling with -Og and with ld.gold.
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls") set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/dbms/tests/msan_suppressions.txt")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
@ -40,7 +40,6 @@ if (SANITIZE)
set (ENABLE_HDFS 0 CACHE BOOL "") set (ENABLE_HDFS 0 CACHE BOOL "")
set (ENABLE_CAPNP 0 CACHE BOOL "") set (ENABLE_CAPNP 0 CACHE BOOL "")
set (ENABLE_RDKAFKA 0 CACHE BOOL "") set (ENABLE_RDKAFKA 0 CACHE BOOL "")
set (ENABLE_ICU 0 CACHE BOOL "")
set (ENABLE_POCO_MONGODB 0 CACHE BOOL "") set (ENABLE_POCO_MONGODB 0 CACHE BOOL "")
set (ENABLE_POCO_NETSSL 0 CACHE BOOL "") set (ENABLE_POCO_NETSSL 0 CACHE BOOL "")
set (ENABLE_POCO_ODBC 0 CACHE BOOL "") set (ENABLE_POCO_ODBC 0 CACHE BOOL "")
@ -77,6 +76,9 @@ if (SANITIZE)
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan")
endif () endif ()
# llvm-tblgen, that is used during LLVM build, doesn't work with UBSan.
set (ENABLE_EMBEDDED_COMPILER 0 CACHE BOOL "")
elseif (SANITIZE STREQUAL "libfuzzer") elseif (SANITIZE STREQUAL "libfuzzer")
# NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends. # NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends.
# NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them (tests) have entry point for fuzzer and it's not checked. # NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them (tests) have entry point for fuzzer and it's not checked.

View File

@ -1,2 +1,2 @@
wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz
tar --strip-components=1 xJf MacOSX10.14.sdk.tar.xz tar xJf MacOSX10.14.sdk.tar.xz --strip-components=1

View File

@ -1,2 +1,2 @@
wget https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz
tar --strip-components=1 xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz --strip-components=1

View File

@ -66,34 +66,19 @@ if (USE_INTERNAL_ZLIB_LIBRARY)
endif () endif ()
add_subdirectory (${INTERNAL_ZLIB_NAME}) add_subdirectory (${INTERNAL_ZLIB_NAME})
# TODO: make pull to Dead2/zlib-ng and remove:
# We should use same defines when including zlib.h as used when zlib compiled # We should use same defines when including zlib.h as used when zlib compiled
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP) target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP) target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "AMD64") if (ARCH_AMD64 OR ARCH_AARCH64)
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK) target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK) target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK)
endif () endif ()
#set_target_properties(example PROPERTIES EXCLUDE_FROM_ALL 1)
#if (TARGET example64)
# set_target_properties(example64 PROPERTIES EXCLUDE_FROM_ALL 1)
#endif ()
#set_target_properties(minigzip PROPERTIES EXCLUDE_FROM_ALL 1)
#if (TARGET minigzip64)
# set_target_properties(minigzip64 PROPERTIES EXCLUDE_FROM_ALL 1)
#endif ()
endif () endif ()
if (USE_INTERNAL_CCTZ_LIBRARY) if (USE_INTERNAL_CCTZ_LIBRARY)
add_subdirectory (cctz-cmake) add_subdirectory (cctz-cmake)
endif () endif ()
if (ENABLE_TCMALLOC AND USE_INTERNAL_GPERFTOOLS_LIBRARY)
add_subdirectory (libtcmalloc)
endif ()
if (ENABLE_JEMALLOC AND USE_INTERNAL_JEMALLOC_LIBRARY) if (ENABLE_JEMALLOC AND USE_INTERNAL_JEMALLOC_LIBRARY)
add_subdirectory (jemalloc-cmake) add_subdirectory (jemalloc-cmake)
endif () endif ()
@ -107,20 +92,10 @@ if (USE_INTERNAL_H3_LIBRARY)
endif () endif ()
if (USE_INTERNAL_SSL_LIBRARY) if (USE_INTERNAL_SSL_LIBRARY)
if (NOT MAKE_STATIC_LIBRARIES) add_subdirectory (openssl-cmake)
set (BUILD_SHARED 1)
endif ()
# By default, ${CMAKE_INSTALL_PREFIX}/etc/ssl is selected - that is not what we need. # This is for Poco library
# We need to use system wide ssl directory.
set (OPENSSLDIR "/etc/ssl")
set (LIBRESSL_SKIP_INSTALL 1 CACHE INTERNAL "")
add_subdirectory (ssl)
target_include_directories(${OPENSSL_CRYPTO_LIBRARY} SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR})
target_include_directories(${OPENSSL_SSL_LIBRARY} SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR})
set (POCO_SKIP_OPENSSL_FIND 1) set (POCO_SKIP_OPENSSL_FIND 1)
add_library(OpenSSL::Crypto ALIAS ${OPENSSL_CRYPTO_LIBRARY}) add_library(OpenSSL::Crypto ALIAS ${OPENSSL_CRYPTO_LIBRARY})
add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY}) add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY})
endif () endif ()
@ -165,6 +140,10 @@ if (ENABLE_ODBC AND USE_INTERNAL_ODBC_LIBRARY)
add_library(ODBC::ODBC ALIAS ${ODBC_LIBRARIES}) add_library(ODBC::ODBC ALIAS ${ODBC_LIBRARIES})
endif () endif ()
if (ENABLE_ICU AND USE_INTERNAL_ICU_LIBRARY)
add_subdirectory (icu-cmake)
endif ()
if (USE_INTERNAL_PARQUET_LIBRARY) if (USE_INTERNAL_PARQUET_LIBRARY)
if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE) if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
# We dont use arrow's cmakefiles because they uses too many depends and download some libs in compile time # We dont use arrow's cmakefiles because they uses too many depends and download some libs in compile time
@ -181,10 +160,7 @@ if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
set (ARROW_VERBOSE_THIRDPARTY_BUILD ON CACHE INTERNAL "") set (ARROW_VERBOSE_THIRDPARTY_BUILD ON CACHE INTERNAL "")
set (ARROW_BUILD_SHARED 1 CACHE INTERNAL "") set (ARROW_BUILD_SHARED 1 CACHE INTERNAL "")
set (ARROW_BOOST_HEADER_ONLY ON CACHE INTERNAL "") set (ARROW_BOOST_HEADER_ONLY ON CACHE INTERNAL "")
#set (BOOST_INCLUDEDIR Boost_INCLUDE_DIRS)
set (Boost_FOUND 1 CACHE INTERNAL "") set (Boost_FOUND 1 CACHE INTERNAL "")
#set (ZLIB_HOME ${ZLIB_INCLUDE_DIR})
#set (ZLIB_FOUND 1)
if (MAKE_STATIC_LIBRARIES) if (MAKE_STATIC_LIBRARIES)
set (PARQUET_ARROW_LINKAGE "static" CACHE INTERNAL "") set (PARQUET_ARROW_LINKAGE "static" CACHE INTERNAL "")
set (ARROW_TEST_LINKAGE "static" CACHE INTERNAL "") set (ARROW_TEST_LINKAGE "static" CACHE INTERNAL "")
@ -224,6 +200,11 @@ else()
endif() endif()
add_subdirectory(arrow-cmake) add_subdirectory(arrow-cmake)
# The library is large - avoid bloat.
target_compile_options (${ARROW_LIBRARY} PRIVATE -g0)
target_compile_options (${THRIFT_LIBRARY} PRIVATE -g0)
target_compile_options (${PARQUET_LIBRARY} PRIVATE -g0)
endif() endif()
endif() endif()
@ -260,28 +241,14 @@ elseif(GTEST_SRC_DIR)
target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0) target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0)
endif() endif()
if (USE_INTERNAL_LLVM_LIBRARY) if (USE_EMBEDDED_COMPILER AND USE_INTERNAL_LLVM_LIBRARY)
file(GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp CONTENT " ")
add_library(LLVM0 ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp) # silly cmake bug fix
add_library(LLVMOFF ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp)
# ld: unknown option: --color-diagnostics # ld: unknown option: --color-diagnostics
if (APPLE) if (APPLE)
set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "") set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "")
endif () endif ()
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "") set (LLVM_ENABLE_EH 1 CACHE INTERNAL "")
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "") set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
set (LLVM_INCLUDE_TESTS 0 CACHE INTERNAL "") set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE STRING "")
set (LLVM_INCLUDE_EXAMPLES 0 CACHE INTERNAL "")
set (LLVM_INCLUDE_TOOLS 0 CACHE INTERNAL "")
set (LLVM_INSTALL_TOOLCHAIN_ONLY 0 CACHE INTERNAL "")
set (CLANG_BUILT_STANDALONE 0 CACHE INTERNAL "")
set (LLDB_BUILT_STANDALONE 0 CACHE INTERNAL "")
set (CLANG_ENABLE_STATIC_ANALYZER 0 CACHE INTERNAL "")
set (CLANG_ENABLE_ARCMT 0 CACHE INTERNAL "")
set (CLANG_BUILD_TOOLS 0 CACHE INTERNAL "")
set (BENCHMARK_ENABLE_GTEST_TESTS 0 CACHE INTERNAL "")
set (BENCHMARK_ENABLE_ASSEMBLY_TESTS 0 CACHE INTERNAL "")
set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE INTERNAL "")
add_subdirectory (llvm/llvm) add_subdirectory (llvm/llvm)
endif () endif ()
@ -313,12 +280,39 @@ if (USE_INTERNAL_HDFS3_LIBRARY)
add_subdirectory(libhdfs3-cmake) add_subdirectory(libhdfs3-cmake)
endif () endif ()
if (USE_INTERNAL_AWS_S3_LIBRARY)
set (save_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
set (save_CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
set (save_CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES})
set (save_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS})
set (save_CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH})
add_subdirectory(curl-cmake)
set (CMAKE_C_FLAGS ${save_CMAKE_C_FLAGS})
set (CMAKE_REQUIRED_LIBRARIES ${save_CMAKE_REQUIRED_LIBRARIES})
set (CMAKE_CMAKE_REQUIRED_INCLUDES ${save_CMAKE_REQUIRED_INCLUDES})
set (CMAKE_REQUIRED_FLAGS ${save_CMAKE_REQUIRED_FLAGS})
set (CMAKE_CMAKE_MODULE_PATH ${save_CMAKE_MODULE_PATH})
add_subdirectory(aws-s3-cmake)
# The library is large - avoid bloat.
target_compile_options (aws_s3 PRIVATE -g0)
target_compile_options (aws_s3_checksums PRIVATE -g0)
target_compile_options (libcurl PRIVATE -g0)
endif ()
if (USE_BASE64) if (USE_BASE64)
add_subdirectory (base64-cmake) add_subdirectory (base64-cmake)
endif() endif()
if (USE_INTERNAL_HYPERSCAN_LIBRARY) if (USE_INTERNAL_HYPERSCAN_LIBRARY)
add_subdirectory (hyperscan) add_subdirectory (hyperscan)
# The library is large - avoid bloat.
if (USE_STATIC_LIBRARIES)
target_compile_options (hs PRIVATE -g0)
else ()
target_compile_options (hs_shared PRIVATE -g0)
endif ()
endif() endif()
if (USE_SIMDJSON) if (USE_SIMDJSON)
@ -332,7 +326,3 @@ endif()
if (USE_FASTOPS) if (USE_FASTOPS)
add_subdirectory (fastops-cmake) add_subdirectory (fastops-cmake)
endif() endif()
#if (USE_INTERNAL_ORC_LIBRARY)
# add_subdirectory(orc-cmake)
#endif ()

1
contrib/aws vendored Submodule

@ -0,0 +1 @@
Subproject commit 45dd8552d3c492defca79d2720bcc809e35654da

1
contrib/aws-c-common vendored Submodule

@ -0,0 +1 @@
Subproject commit 736a82d1697c108b04a277e66438a7f4e19b6857

1
contrib/aws-c-event-stream vendored Submodule

@ -0,0 +1 @@
Subproject commit 3bc33662f9ccff4f4cbcf9509cc78c26e022fde0

1
contrib/aws-checksums vendored Submodule

@ -0,0 +1 @@
Subproject commit 519d6d9093819b6cf89ffff589a27ef8f83d0f65

View File

@ -0,0 +1,107 @@
SET(AWS_S3_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3)
SET(AWS_CORE_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core)
SET(AWS_CHECKSUMS_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-checksums)
SET(AWS_COMMON_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-c-common)
SET(AWS_EVENT_STREAM_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream)
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
configure_file("${AWS_CORE_LIBRARY_DIR}/include/aws/core/SDKConfig.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
configure_file("${AWS_COMMON_LIBRARY_DIR}/include/aws/common/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
file(GLOB AWS_CORE_SOURCES
"${AWS_CORE_LIBRARY_DIR}/source/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/auth/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/client/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/standard/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/curl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/config/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/external/cjson/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/external/tinyxml2/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/internal/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/monitoring/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/net/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/linux-shared/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/platform/linux-shared/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/base64/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/event/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/openssl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/factory/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/json/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/logging/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/stl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/stream/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/threading/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/xml/*.cpp"
)
file(GLOB AWS_S3_SOURCES
"${AWS_S3_LIBRARY_DIR}/source/*.cpp"
)
file(GLOB AWS_S3_MODEL_SOURCES
"${AWS_S3_LIBRARY_DIR}/source/model/*.cpp"
)
file(GLOB AWS_EVENT_STREAM_SOURCES
"${AWS_EVENT_STREAM_LIBRARY_DIR}/source/*.c"
)
file(GLOB AWS_COMMON_SOURCES
"${AWS_COMMON_LIBRARY_DIR}/source/*.c"
"${AWS_COMMON_LIBRARY_DIR}/source/posix/*.c"
)
file(GLOB AWS_CHECKSUMS_SOURCES
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/*.c"
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/intel/*.c"
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/arm/*.c"
)
file(GLOB S3_UNIFIED_SRC
${AWS_EVENT_STREAM_SOURCES}
${AWS_COMMON_SOURCES}
${AWS_S3_SOURCES}
${AWS_S3_MODEL_SOURCES}
${AWS_CORE_SOURCES}
)
set(S3_INCLUDES
"${CMAKE_CURRENT_SOURCE_DIR}/include/"
"${AWS_COMMON_LIBRARY_DIR}/include/"
"${AWS_EVENT_STREAM_LIBRARY_DIR}/include/"
"${AWS_S3_LIBRARY_DIR}/include/"
"${AWS_CORE_LIBRARY_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include/"
)
add_library(aws_s3_checksums ${AWS_CHECKSUMS_SOURCES})
target_include_directories(aws_s3_checksums PUBLIC "${AWS_CHECKSUMS_LIBRARY_DIR}/include/")
if(CMAKE_BUILD_TYPE STREQUAL "" OR CMAKE_BUILD_TYPE STREQUAL "Debug")
target_compile_definitions(aws_s3_checksums PRIVATE "-DDEBUG_BUILD")
endif()
set_target_properties(aws_s3_checksums PROPERTIES COMPILE_OPTIONS -fPIC)
set_target_properties(aws_s3_checksums PROPERTIES LINKER_LANGUAGE C)
set_property(TARGET aws_s3_checksums PROPERTY C_STANDARD 99)
add_library(aws_s3 ${S3_UNIFIED_SRC})
target_compile_definitions(aws_s3 PUBLIC -DENABLE_CURL_CLIENT)
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
target_include_directories(aws_s3 PUBLIC ${S3_INCLUDES} "${CMAKE_BINARY_DIR}/install")
if (OPENSSL_FOUND)
target_compile_definitions(aws_s3 PUBLIC -DENABLE_OPENSSL_ENCRYPTION)
target_link_libraries(aws_s3 PRIVATE ${OPENSSL_LIBRARIES})
endif()
target_link_libraries(aws_s3 PRIVATE aws_s3_checksums libcurl)

View File

@ -28,8 +28,7 @@ set (KJ_SRCS
) )
add_library(kj ${KJ_SRCS}) add_library(kj ${KJ_SRCS})
target_include_directories(kj PUBLIC ${CAPNPROTO_SOURCE_DIR}) target_include_directories(kj SYSTEM PUBLIC ${CAPNPROTO_SOURCE_DIR})
target_compile_options(kj PUBLIC -Wno-non-virtual-dtor)
set (CAPNP_SRCS set (CAPNP_SRCS
${CAPNPROTO_SOURCE_DIR}/capnp/c++.capnp.c++ ${CAPNPROTO_SOURCE_DIR}/capnp/c++.capnp.c++
@ -50,6 +49,9 @@ set (CAPNP_SRCS
) )
add_library(capnp ${CAPNP_SRCS}) add_library(capnp ${CAPNP_SRCS})
set_target_properties(capnp
PROPERTIES LINKER_LANGUAGE CXX
)
target_link_libraries(capnp PUBLIC kj) target_link_libraries(capnp PUBLIC kj)
set (CAPNPC_SRCS set (CAPNPC_SRCS
@ -67,3 +69,15 @@ set (CAPNPC_SRCS
add_library(capnpc ${CAPNPC_SRCS}) add_library(capnpc ${CAPNPC_SRCS})
target_link_libraries(capnpc PUBLIC capnp) target_link_libraries(capnpc PUBLIC capnp)
# The library has substandard code
if (COMPILER_GCC)
set (SUPPRESS_WARNINGS -Wno-non-virtual-dtor -Wno-sign-compare -Wno-strict-aliasing -Wno-maybe-uninitialized
-Wno-deprecated-declarations -Wno-class-memaccess)
elseif (COMPILER_CLANG)
set (SUPPRESS_WARNINGS -Wno-non-virtual-dtor -Wno-sign-compare -Wno-strict-aliasing -Wno-deprecated-declarations)
endif ()
target_compile_options(kj PRIVATE ${SUPPRESS_WARNINGS})
target_compile_options(capnp PRIVATE ${SUPPRESS_WARNINGS})
target_compile_options(capnpc PRIVATE ${SUPPRESS_WARNINGS})

View File

@ -1,6 +1,6 @@
add_library(roaring add_library(roaring
roaring.c roaring.c
roaring/roaring.h roaring/roaring.h
roaring/roaring.hh) roaring/roaring.hh)
target_include_directories (roaring PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) target_include_directories (roaring SYSTEM PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})

1
contrib/curl vendored Submodule

@ -0,0 +1 @@
Subproject commit 3b8bbbbd1609c638a3d3d0acb148a33dedb67be3

View File

@ -0,0 +1,61 @@
include(CheckCSourceCompiles)
option(CURL_HIDDEN_SYMBOLS "Set to ON to hide libcurl internal symbols (=hide all symbols that aren't officially external)." ON)
mark_as_advanced(CURL_HIDDEN_SYMBOLS)
if(CURL_HIDDEN_SYMBOLS)
set(SUPPORTS_SYMBOL_HIDING FALSE)
if(CMAKE_C_COMPILER_ID MATCHES "Clang")
set(SUPPORTS_SYMBOL_HIDING TRUE)
set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))")
set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden")
elseif(CMAKE_COMPILER_IS_GNUCC)
if(NOT CMAKE_VERSION VERSION_LESS 2.8.10)
set(GCC_VERSION ${CMAKE_C_COMPILER_VERSION})
else()
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion
OUTPUT_VARIABLE GCC_VERSION)
endif()
if(NOT GCC_VERSION VERSION_LESS 3.4)
# note: this is considered buggy prior to 4.0 but the autotools don't care, so let's ignore that fact
set(SUPPORTS_SYMBOL_HIDING TRUE)
set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))")
set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden")
endif()
elseif(CMAKE_C_COMPILER_ID MATCHES "SunPro" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 8.0)
set(SUPPORTS_SYMBOL_HIDING TRUE)
set(_SYMBOL_EXTERN "__global")
set(_CFLAG_SYMBOLS_HIDE "-xldscope=hidden")
elseif(CMAKE_C_COMPILER_ID MATCHES "Intel" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 9.0)
# note: this should probably just check for version 9.1.045 but I'm not 100% sure
# so let's do it the same way autotools do.
set(SUPPORTS_SYMBOL_HIDING TRUE)
set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))")
set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden")
check_c_source_compiles("#include <stdio.h>
int main (void) { printf(\"icc fvisibility bug test\"); return 0; }" _no_bug)
if(NOT _no_bug)
set(SUPPORTS_SYMBOL_HIDING FALSE)
set(_SYMBOL_EXTERN "")
set(_CFLAG_SYMBOLS_HIDE "")
endif()
elseif(MSVC)
set(SUPPORTS_SYMBOL_HIDING TRUE)
endif()
set(HIDES_CURL_PRIVATE_SYMBOLS ${SUPPORTS_SYMBOL_HIDING})
elseif(MSVC)
if(NOT CMAKE_VERSION VERSION_LESS 3.7)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE) #present since 3.4.3 but broken
set(HIDES_CURL_PRIVATE_SYMBOLS FALSE)
else()
message(WARNING "Hiding private symbols regardless CURL_HIDDEN_SYMBOLS being disabled.")
set(HIDES_CURL_PRIVATE_SYMBOLS TRUE)
endif()
else()
set(HIDES_CURL_PRIVATE_SYMBOLS FALSE)
endif()
set(CURL_CFLAG_SYMBOLS_HIDE ${_CFLAG_SYMBOLS_HIDE})
set(CURL_EXTERN_SYMBOL ${_SYMBOL_EXTERN})

View File

@ -0,0 +1,617 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#ifdef TIME_WITH_SYS_TIME
/* Time with sys/time test */
#include <sys/types.h>
#include <sys/time.h>
#include <time.h>
int
main ()
{
if ((struct tm *) 0)
return 0;
;
return 0;
}
#endif
#ifdef HAVE_FCNTL_O_NONBLOCK
/* headers for FCNTL_O_NONBLOCK test */
#include <sys/types.h>
#include <unistd.h>
#include <fcntl.h>
/* */
#if defined(sun) || defined(__sun__) || \
defined(__SUNPRO_C) || defined(__SUNPRO_CC)
# if defined(__SVR4) || defined(__srv4__)
# define PLATFORM_SOLARIS
# else
# define PLATFORM_SUNOS4
# endif
#endif
#if (defined(_AIX) || defined(__xlC__)) && !defined(_AIX41)
# define PLATFORM_AIX_V3
#endif
/* */
#if defined(PLATFORM_SUNOS4) || defined(PLATFORM_AIX_V3) || defined(__BEOS__)
#error "O_NONBLOCK does not work on this platform"
#endif
int
main ()
{
/* O_NONBLOCK source test */
int flags = 0;
if(0 != fcntl(0, F_SETFL, flags | O_NONBLOCK))
return 1;
return 0;
}
#endif
/* tests for gethostbyaddr_r or gethostbyname_r */
#if defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT) || \
defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT) || \
defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT) || \
defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT) || \
defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT) || \
defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT)
# define _REENTRANT
/* no idea whether _REENTRANT is always set, just invent a new flag */
# define TEST_GETHOSTBYFOO_REENTRANT
#endif
#if defined(HAVE_GETHOSTBYADDR_R_5) || \
defined(HAVE_GETHOSTBYADDR_R_7) || \
defined(HAVE_GETHOSTBYADDR_R_8) || \
defined(HAVE_GETHOSTBYNAME_R_3) || \
defined(HAVE_GETHOSTBYNAME_R_5) || \
defined(HAVE_GETHOSTBYNAME_R_6) || \
defined(TEST_GETHOSTBYFOO_REENTRANT)
#include <sys/types.h>
#include <netdb.h>
int main(void)
{
char *address = "example.com";
int length = 0;
int type = 0;
struct hostent h;
int rc = 0;
#if defined(HAVE_GETHOSTBYADDR_R_5) || \
defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT) || \
\
defined(HAVE_GETHOSTBYNAME_R_3) || \
defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT)
struct hostent_data hdata;
#elif defined(HAVE_GETHOSTBYADDR_R_7) || \
defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT) || \
defined(HAVE_GETHOSTBYADDR_R_8) || \
defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT) || \
\
defined(HAVE_GETHOSTBYNAME_R_5) || \
defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT) || \
defined(HAVE_GETHOSTBYNAME_R_6) || \
defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT)
char buffer[8192];
int h_errnop;
struct hostent *hp;
#endif
#ifndef gethostbyaddr_r
(void)gethostbyaddr_r;
#endif
#if defined(HAVE_GETHOSTBYADDR_R_5) || \
defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT)
rc = gethostbyaddr_r(address, length, type, &h, &hdata);
(void)rc;
#elif defined(HAVE_GETHOSTBYADDR_R_7) || \
defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT)
hp = gethostbyaddr_r(address, length, type, &h, buffer, 8192, &h_errnop);
(void)hp;
#elif defined(HAVE_GETHOSTBYADDR_R_8) || \
defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT)
rc = gethostbyaddr_r(address, length, type, &h, buffer, 8192, &hp, &h_errnop);
(void)rc;
#endif
#if defined(HAVE_GETHOSTBYNAME_R_3) || \
defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT)
rc = gethostbyname_r(address, &h, &hdata);
#elif defined(HAVE_GETHOSTBYNAME_R_5) || \
defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT)
rc = gethostbyname_r(address, &h, buffer, 8192, &h_errnop);
(void)hp; /* not used for test */
#elif defined(HAVE_GETHOSTBYNAME_R_6) || \
defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT)
rc = gethostbyname_r(address, &h, buffer, 8192, &hp, &h_errnop);
#endif
(void)length;
(void)type;
(void)rc;
return 0;
}
#endif
#ifdef HAVE_SOCKLEN_T
#ifdef _WIN32
#include <ws2tcpip.h>
#else
#include <sys/types.h>
#include <sys/socket.h>
#endif
int
main ()
{
if ((socklen_t *) 0)
return 0;
if (sizeof (socklen_t))
return 0;
;
return 0;
}
#endif
#ifdef HAVE_IN_ADDR_T
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
int
main ()
{
if ((in_addr_t *) 0)
return 0;
if (sizeof (in_addr_t))
return 0;
;
return 0;
}
#endif
#ifdef HAVE_BOOL_T
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_STDBOOL_H
#include <stdbool.h>
#endif
int
main ()
{
if (sizeof (bool *) )
return 0;
;
return 0;
}
#endif
#ifdef STDC_HEADERS
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <float.h>
int main() { return 0; }
#endif
#ifdef RETSIGTYPE_TEST
#include <sys/types.h>
#include <signal.h>
#ifdef signal
# undef signal
#endif
#ifdef __cplusplus
extern "C" void (*signal (int, void (*)(int)))(int);
#else
void (*signal ()) ();
#endif
int
main ()
{
return 0;
}
#endif
#ifdef HAVE_INET_NTOA_R_DECL
#include <arpa/inet.h>
typedef void (*func_type)();
int main()
{
#ifndef inet_ntoa_r
func_type func;
func = (func_type)inet_ntoa_r;
(void)func;
#endif
return 0;
}
#endif
#ifdef HAVE_INET_NTOA_R_DECL_REENTRANT
#define _REENTRANT
#include <arpa/inet.h>
typedef void (*func_type)();
int main()
{
#ifndef inet_ntoa_r
func_type func;
func = (func_type)&inet_ntoa_r;
(void)func;
#endif
return 0;
}
#endif
#ifdef HAVE_GETADDRINFO
#include <netdb.h>
#include <sys/types.h>
#include <sys/socket.h>
int main(void) {
struct addrinfo hints, *ai;
int error;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
#ifndef getaddrinfo
(void)getaddrinfo;
#endif
error = getaddrinfo("127.0.0.1", "8080", &hints, &ai);
if (error) {
return 1;
}
return 0;
}
#endif
#ifdef HAVE_FILE_OFFSET_BITS
#ifdef _FILE_OFFSET_BITS
#undef _FILE_OFFSET_BITS
#endif
#define _FILE_OFFSET_BITS 64
#include <sys/types.h>
/* Check that off_t can represent 2**63 - 1 correctly.
We can't simply define LARGE_OFF_T to be 9223372036854775807,
since some C++ compilers masquerading as C compilers
incorrectly reject 9223372036854775807. */
#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
&& LARGE_OFF_T % 2147483647 == 1)
? 1 : -1];
int main () { ; return 0; }
#endif
#ifdef HAVE_IOCTLSOCKET
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
/* ioctlsocket source code */
int socket;
unsigned long flags = ioctlsocket(socket, FIONBIO, &flags);
;
return 0;
}
#endif
#ifdef HAVE_IOCTLSOCKET_CAMEL
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
/* IoctlSocket source code */
if(0 != IoctlSocket(0, 0, 0))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTLSOCKET_CAMEL_FIONBIO
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
/* IoctlSocket source code */
long flags = 0;
if(0 != ioctlsocket(0, FIONBIO, &flags))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTLSOCKET_FIONBIO
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
int flags = 0;
if(0 != ioctlsocket(0, FIONBIO, &flags))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTL_FIONBIO
/* headers for FIONBIO test */
/* includes start */
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
# include <sys/ioctl.h>
#endif
#ifdef HAVE_STROPTS_H
# include <stropts.h>
#endif
int
main ()
{
int flags = 0;
if(0 != ioctl(0, FIONBIO, &flags))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTL_SIOCGIFADDR
/* headers for FIONBIO test */
/* includes start */
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
# include <sys/ioctl.h>
#endif
#ifdef HAVE_STROPTS_H
# include <stropts.h>
#endif
#include <net/if.h>
int
main ()
{
struct ifreq ifr;
if(0 != ioctl(0, SIOCGIFADDR, &ifr))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_SETSOCKOPT_SO_NONBLOCK
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
/* includes start */
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
/* includes end */
int
main ()
{
if(0 != setsockopt(0, SOL_SOCKET, SO_NONBLOCK, 0, 0))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_GLIBC_STRERROR_R
#include <string.h>
#include <errno.h>
void check(char c) {}
int
main () {
char buffer[1024];
/* This will not compile if strerror_r does not return a char* */
check(strerror_r(EACCES, buffer, sizeof(buffer))[0]);
return 0;
}
#endif
#ifdef HAVE_POSIX_STRERROR_R
#include <string.h>
#include <errno.h>
/* float, because a pointer can't be implicitly cast to float */
void check(float f) {}
int
main () {
char buffer[1024];
/* This will not compile if strerror_r does not return an int */
check(strerror_r(EACCES, buffer, sizeof(buffer)));
return 0;
}
#endif
#ifdef HAVE_FSETXATTR_6
#include <sys/xattr.h> /* header from libc, not from libattr */
int
main() {
fsetxattr(0, 0, 0, 0, 0, 0);
return 0;
}
#endif
#ifdef HAVE_FSETXATTR_5
#include <sys/xattr.h> /* header from libc, not from libattr */
int
main() {
fsetxattr(0, 0, 0, 0, 0);
return 0;
}
#endif
#ifdef HAVE_CLOCK_GETTIME_MONOTONIC
#include <time.h>
int
main() {
struct timespec ts = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &ts);
return 0;
}
#endif
#ifdef HAVE_BUILTIN_AVAILABLE
int
main() {
if(__builtin_available(macOS 10.12, *)) {}
return 0;
}
#endif
#ifdef HAVE_VARIADIC_MACROS_C99
#define c99_vmacro3(first, ...) fun3(first, __VA_ARGS__)
#define c99_vmacro2(first, ...) fun2(first, __VA_ARGS__)
int fun3(int arg1, int arg2, int arg3);
int fun2(int arg1, int arg2);
int fun3(int arg1, int arg2, int arg3) {
return arg1 + arg2 + arg3;
}
int fun2(int arg1, int arg2) {
return arg1 + arg2;
}
int
main() {
int res3 = c99_vmacro3(1, 2, 3);
int res2 = c99_vmacro2(1, 2);
(void)res3;
(void)res2;
return 0;
}
#endif
#ifdef HAVE_VARIADIC_MACROS_GCC
#define gcc_vmacro3(first, args...) fun3(first, args)
#define gcc_vmacro2(first, args...) fun2(first, args)
int fun3(int arg1, int arg2, int arg3);
int fun2(int arg1, int arg2);
int fun3(int arg1, int arg2, int arg3) {
return arg1 + arg2 + arg3;
}
int fun2(int arg1, int arg2) {
return arg1 + arg2;
}
int
main() {
int res3 = gcc_vmacro3(1, 2, 3);
int res2 = gcc_vmacro2(1, 2);
(void)res3;
(void)res2;
return 0;
}
#endif

View File

@ -0,0 +1,84 @@
#File defines convenience macros for available feature testing
# This macro checks if the symbol exists in the library and if it
# does, it prepends library to the list. It is intended to be called
# multiple times with a sequence of possibly dependent libraries in
# order of least-to-most-dependent. Some libraries depend on others
# to link correctly.
macro(check_library_exists_concat LIBRARY SYMBOL VARIABLE)
check_library_exists("${LIBRARY};${CURL_LIBS}" ${SYMBOL} "${CMAKE_LIBRARY_PATH}"
${VARIABLE})
if(${VARIABLE})
set(CURL_LIBS ${LIBRARY} ${CURL_LIBS})
endif()
endmacro()
# Check if header file exists and add it to the list.
# This macro is intended to be called multiple times with a sequence of
# possibly dependent header files. Some headers depend on others to be
# compiled correctly.
macro(check_include_file_concat FILE VARIABLE)
check_include_files("${CURL_INCLUDES};${FILE}" ${VARIABLE})
if(${VARIABLE})
set(CURL_INCLUDES ${CURL_INCLUDES} ${FILE})
set(CURL_TEST_DEFINES "${CURL_TEST_DEFINES} -D${VARIABLE}")
endif()
endmacro()
# For other curl specific tests, use this macro.
macro(curl_internal_test CURL_TEST)
if(NOT DEFINED "${CURL_TEST}")
set(MACRO_CHECK_FUNCTION_DEFINITIONS
"-D${CURL_TEST} ${CURL_TEST_DEFINES} ${CMAKE_REQUIRED_FLAGS}")
if(CMAKE_REQUIRED_LIBRARIES)
set(CURL_TEST_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
endif()
try_compile(${CURL_TEST}
${CMAKE_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/CMake/CurlTests.c
CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS}
"${CURL_TEST_ADD_LIBRARIES}"
OUTPUT_VARIABLE OUTPUT)
if(${CURL_TEST})
set(${CURL_TEST} 1 CACHE INTERNAL "Curl test ${FUNCTION}")
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
"Performing Curl Test ${CURL_TEST} passed with the following output:\n"
"${OUTPUT}\n")
else()
set(${CURL_TEST} "" CACHE INTERNAL "Curl test ${FUNCTION}")
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
"Performing Curl Test ${CURL_TEST} failed with the following output:\n"
"${OUTPUT}\n")
endif()
endif()
endmacro()
macro(curl_nroff_check)
find_program(NROFF NAMES gnroff nroff)
if(NROFF)
# Need a way to write to stdin, this will do
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt" "test")
# Tests for a valid nroff option to generate a manpage
foreach(_MANOPT "-man" "-mandoc")
execute_process(COMMAND "${NROFF}" ${_MANOPT}
OUTPUT_VARIABLE NROFF_MANOPT_OUTPUT
INPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt"
ERROR_QUIET)
# Save the option if it was valid
if(NROFF_MANOPT_OUTPUT)
set(NROFF_MANOPT ${_MANOPT})
set(NROFF_USEFUL ON)
break()
endif()
endforeach()
# No need for the temporary file
file(REMOVE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt")
if(NOT NROFF_USEFUL)
message(WARNING "Found no *nroff option to get plaintext from man pages")
endif()
else()
message(WARNING "Found no *nroff program")
endif()
endmacro()

View File

@ -0,0 +1,260 @@
include(CheckCSourceCompiles)
# The begin of the sources (macros and includes)
set(_source_epilogue "#undef inline")
macro(add_header_include check header)
if(${check})
set(_source_epilogue "${_source_epilogue}\n#include <${header}>")
endif()
endmacro()
set(signature_call_conv)
if(HAVE_WINDOWS_H)
add_header_include(HAVE_WINSOCK2_H "winsock2.h")
add_header_include(HAVE_WINDOWS_H "windows.h")
add_header_include(HAVE_WINSOCK_H "winsock.h")
set(_source_epilogue
"${_source_epilogue}\n#ifndef WIN32_LEAN_AND_MEAN\n#define WIN32_LEAN_AND_MEAN\n#endif")
set(signature_call_conv "PASCAL")
if(HAVE_LIBWS2_32)
set(CMAKE_REQUIRED_LIBRARIES ws2_32)
endif()
else()
add_header_include(HAVE_SYS_TYPES_H "sys/types.h")
add_header_include(HAVE_SYS_SOCKET_H "sys/socket.h")
endif()
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
check_c_source_compiles("${_source_epilogue}
int main(void) {
recv(0, 0, 0, 0);
return 0;
}" curl_cv_recv)
if(curl_cv_recv)
if(NOT DEFINED curl_cv_func_recv_args OR "${curl_cv_func_recv_args}" STREQUAL "unknown")
foreach(recv_retv "int" "ssize_t" )
foreach(recv_arg1 "SOCKET" "int" )
foreach(recv_arg2 "char *" "void *" )
foreach(recv_arg3 "int" "size_t" "socklen_t" "unsigned int")
foreach(recv_arg4 "int" "unsigned int")
if(NOT curl_cv_func_recv_done)
unset(curl_cv_func_recv_test CACHE)
check_c_source_compiles("
${_source_epilogue}
extern ${recv_retv} ${signature_call_conv}
recv(${recv_arg1}, ${recv_arg2}, ${recv_arg3}, ${recv_arg4});
int main(void) {
${recv_arg1} s=0;
${recv_arg2} buf=0;
${recv_arg3} len=0;
${recv_arg4} flags=0;
${recv_retv} res = recv(s, buf, len, flags);
(void) res;
return 0;
}"
curl_cv_func_recv_test)
if(curl_cv_func_recv_test)
set(curl_cv_func_recv_args
"${recv_arg1},${recv_arg2},${recv_arg3},${recv_arg4},${recv_retv}")
set(RECV_TYPE_ARG1 "${recv_arg1}")
set(RECV_TYPE_ARG2 "${recv_arg2}")
set(RECV_TYPE_ARG3 "${recv_arg3}")
set(RECV_TYPE_ARG4 "${recv_arg4}")
set(RECV_TYPE_RETV "${recv_retv}")
set(HAVE_RECV 1)
set(curl_cv_func_recv_done 1)
endif()
endif()
endforeach()
endforeach()
endforeach()
endforeach()
endforeach()
else()
string(REGEX REPLACE "^([^,]*),[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG1 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,([^,]*),[^,]*,[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG2 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,([^,]*),[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG3 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,([^,]*),[^,]*$" "\\1" RECV_TYPE_ARG4 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)$" "\\1" RECV_TYPE_RETV "${curl_cv_func_recv_args}")
endif()
if("${curl_cv_func_recv_args}" STREQUAL "unknown")
message(FATAL_ERROR "Cannot find proper types to use for recv args")
endif()
else()
message(FATAL_ERROR "Unable to link function recv")
endif()
set(curl_cv_func_recv_args "${curl_cv_func_recv_args}" CACHE INTERNAL "Arguments for recv")
set(HAVE_RECV 1)
check_c_source_compiles("${_source_epilogue}
int main(void) {
send(0, 0, 0, 0);
return 0;
}" curl_cv_send)
if(curl_cv_send)
if(NOT DEFINED curl_cv_func_send_args OR "${curl_cv_func_send_args}" STREQUAL "unknown")
foreach(send_retv "int" "ssize_t" )
foreach(send_arg1 "SOCKET" "int" "ssize_t" )
foreach(send_arg2 "const char *" "const void *" "void *" "char *")
foreach(send_arg3 "int" "size_t" "socklen_t" "unsigned int")
foreach(send_arg4 "int" "unsigned int")
if(NOT curl_cv_func_send_done)
unset(curl_cv_func_send_test CACHE)
check_c_source_compiles("
${_source_epilogue}
extern ${send_retv} ${signature_call_conv}
send(${send_arg1}, ${send_arg2}, ${send_arg3}, ${send_arg4});
int main(void) {
${send_arg1} s=0;
${send_arg2} buf=0;
${send_arg3} len=0;
${send_arg4} flags=0;
${send_retv} res = send(s, buf, len, flags);
(void) res;
return 0;
}"
curl_cv_func_send_test)
if(curl_cv_func_send_test)
string(REGEX REPLACE "(const) .*" "\\1" send_qual_arg2 "${send_arg2}")
string(REGEX REPLACE "const (.*)" "\\1" send_arg2 "${send_arg2}")
set(curl_cv_func_send_args
"${send_arg1},${send_arg2},${send_arg3},${send_arg4},${send_retv},${send_qual_arg2}")
set(SEND_TYPE_ARG1 "${send_arg1}")
set(SEND_TYPE_ARG2 "${send_arg2}")
set(SEND_TYPE_ARG3 "${send_arg3}")
set(SEND_TYPE_ARG4 "${send_arg4}")
set(SEND_TYPE_RETV "${send_retv}")
set(HAVE_SEND 1)
set(curl_cv_func_send_done 1)
endif()
endif()
endforeach()
endforeach()
endforeach()
endforeach()
endforeach()
else()
string(REGEX REPLACE "^([^,]*),[^,]*,[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG1 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,([^,]*),[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG2 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,([^,]*),[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG3 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,([^,]*),[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG4 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,([^,]*),[^,]*$" "\\1" SEND_TYPE_RETV "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)$" "\\1" SEND_QUAL_ARG2 "${curl_cv_func_send_args}")
endif()
if("${curl_cv_func_send_args}" STREQUAL "unknown")
message(FATAL_ERROR "Cannot find proper types to use for send args")
endif()
set(SEND_QUAL_ARG2 "const")
else()
message(FATAL_ERROR "Unable to link function send")
endif()
set(curl_cv_func_send_args "${curl_cv_func_send_args}" CACHE INTERNAL "Arguments for send")
set(HAVE_SEND 1)
check_c_source_compiles("${_source_epilogue}
int main(void) {
int flag = MSG_NOSIGNAL;
(void)flag;
return 0;
}" HAVE_MSG_NOSIGNAL)
if(NOT HAVE_WINDOWS_H)
add_header_include(HAVE_SYS_TIME_H "sys/time.h")
add_header_include(TIME_WITH_SYS_TIME "time.h")
add_header_include(HAVE_TIME_H "time.h")
endif()
check_c_source_compiles("${_source_epilogue}
int main(void) {
struct timeval ts;
ts.tv_sec = 0;
ts.tv_usec = 0;
(void)ts;
return 0;
}" HAVE_STRUCT_TIMEVAL)
set(HAVE_SIG_ATOMIC_T 1)
set(CMAKE_REQUIRED_FLAGS)
if(HAVE_SIGNAL_H)
set(CMAKE_REQUIRED_FLAGS "-DHAVE_SIGNAL_H")
set(CMAKE_EXTRA_INCLUDE_FILES "signal.h")
endif()
check_type_size("sig_atomic_t" SIZEOF_SIG_ATOMIC_T)
if(HAVE_SIZEOF_SIG_ATOMIC_T)
check_c_source_compiles("
#ifdef HAVE_SIGNAL_H
# include <signal.h>
#endif
int main(void) {
static volatile sig_atomic_t dummy = 0;
(void)dummy;
return 0;
}" HAVE_SIG_ATOMIC_T_NOT_VOLATILE)
if(NOT HAVE_SIG_ATOMIC_T_NOT_VOLATILE)
set(HAVE_SIG_ATOMIC_T_VOLATILE 1)
endif()
endif()
if(HAVE_WINDOWS_H)
set(CMAKE_EXTRA_INCLUDE_FILES winsock2.h)
else()
set(CMAKE_EXTRA_INCLUDE_FILES)
if(HAVE_SYS_SOCKET_H)
set(CMAKE_EXTRA_INCLUDE_FILES sys/socket.h)
endif()
endif()
check_type_size("struct sockaddr_storage" SIZEOF_STRUCT_SOCKADDR_STORAGE)
if(HAVE_SIZEOF_STRUCT_SOCKADDR_STORAGE)
set(HAVE_STRUCT_SOCKADDR_STORAGE 1)
endif()
unset(CMAKE_TRY_COMPILE_TARGET_TYPE)
if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)
# if not cross-compilation...
include(CheckCSourceRuns)
set(CMAKE_REQUIRED_FLAGS "")
if(HAVE_SYS_POLL_H)
set(CMAKE_REQUIRED_FLAGS "-DHAVE_SYS_POLL_H")
elseif(HAVE_POLL_H)
set(CMAKE_REQUIRED_FLAGS "-DHAVE_POLL_H")
endif()
check_c_source_runs("
#include <stdlib.h>
#include <sys/time.h>
#ifdef HAVE_SYS_POLL_H
# include <sys/poll.h>
#elif HAVE_POLL_H
# include <poll.h>
#endif
int main(void)
{
if(0 != poll(0, 0, 10)) {
return 1; /* fail */
}
else {
/* detect the 10.12 poll() breakage */
struct timeval before, after;
int rc;
size_t us;
gettimeofday(&before, NULL);
rc = poll(NULL, 0, 500);
gettimeofday(&after, NULL);
us = (after.tv_sec - before.tv_sec) * 1000000 +
(after.tv_usec - before.tv_usec);
if(us < 400000) {
return 1;
}
}
return 0;
}" HAVE_POLL_FINE)
endif()

View File

@ -0,0 +1,28 @@
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
CMake files under this directory were reused from project curl.
Here are links to original source files:
https://github.com/curl/curl/blob/master/CMake/CurlSymbolHiding.cmake
https://github.com/curl/curl/blob/master/CMake/CurlTests,c
https://github.com/curl/curl/blob/master/CMake/Macros.cmake
https://github.com/curl/curl/blob/master/CMake/OtherTests.cmake

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,634 @@
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
# NOTE:
# This file is shrinked and reworked version of original curl CMakeLists.txt
# Original file link https://github.com/curl/curl/blob/3b8bbbbd1609c638a3d3d0acb148a33dedb67be3/CMakeLists.txt
# If you need to update curl building you can find patch file in this directory
# and apply it to fresh original CMakeLists.txt file.
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
SET(CURL_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/curl)
SET(CURL_LIBRARY_DIR ${CURL_SOURCE_DIR}/lib)
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake;${CMAKE_MODULE_PATH}")
# Disable status messages when perform checks
set(CMAKE_REQUIRED_QUIET TRUE)
include(Macros)
include(CMakeDependentOption)
include(CheckCCompilerFlag)
file(READ ${CURL_SOURCE_DIR}/include/curl/curlver.h CURL_VERSION_H_CONTENTS)
string(REGEX MATCH "#define LIBCURL_VERSION \"[^\"]*"
CURL_VERSION ${CURL_VERSION_H_CONTENTS})
string(REGEX REPLACE "[^\"]+\"" "" CURL_VERSION ${CURL_VERSION})
string(REGEX MATCH "#define LIBCURL_VERSION_NUM 0x[0-9a-fA-F]+"
CURL_VERSION_NUM ${CURL_VERSION_H_CONTENTS})
string(REGEX REPLACE "[^0]+0x" "" CURL_VERSION_NUM ${CURL_VERSION_NUM})
message(STATUS "Use curl version=[${CURL_VERSION}]")
set(OPERATING_SYSTEM "${CMAKE_SYSTEM_NAME}")
set(OS "\"${CMAKE_SYSTEM_NAME}\"")
option(PICKY_COMPILER "Enable picky compiler options" ON)
option(ENABLE_THREADED_RESOLVER "Set to ON to enable threaded DNS lookup" ON)
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG)
if(PICKY_COMPILER)
foreach(_CCOPT -pedantic -Wall -W -Wpointer-arith -Wwrite-strings -Wunused -Wshadow -Winline -Wnested-externs -Wmissing-declarations -Wmissing-prototypes -Wno-long-long -Wfloat-equal -Wno-multichar -Wsign-compare -Wundef -Wno-format-nonliteral -Wendif-labels -Wstrict-prototypes -Wdeclaration-after-statement -Wstrict-aliasing=3 -Wcast-align -Wtype-limits -Wold-style-declaration -Wmissing-parameter-type -Wempty-body -Wclobbered -Wignored-qualifiers -Wconversion -Wno-sign-conversion -Wvla -Wdouble-promotion -Wno-system-headers -Wno-pedantic-ms-format)
# surprisingly, CHECK_C_COMPILER_FLAG needs a new variable to store each new
# test result in.
check_c_compiler_flag(${_CCOPT} OPT${_CCOPT})
if(OPT${_CCOPT})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_CCOPT}")
endif()
endforeach()
endif()
endif()
# For debug libs and exes, add "-d" postfix
if(NOT DEFINED CMAKE_DEBUG_POSTFIX)
set(CMAKE_DEBUG_POSTFIX "-d")
endif()
# initialize CURL_LIBS
set(CURL_LIBS "")
include(CurlSymbolHiding)
# Http only
set(CURL_DISABLE_FTP ON)
set(CURL_DISABLE_LDAP ON)
set(CURL_DISABLE_LDAPS ON)
set(CURL_DISABLE_TELNET ON)
set(CURL_DISABLE_DICT ON)
set(CURL_DISABLE_FILE ON)
set(CURL_DISABLE_TFTP ON)
set(CURL_DISABLE_RTSP ON)
set(CURL_DISABLE_POP3 ON)
set(CURL_DISABLE_IMAP ON)
set(CURL_DISABLE_SMTP ON)
set(CURL_DISABLE_GOPHER ON)
option(CURL_DISABLE_COOKIES "to disable cookies support" OFF)
mark_as_advanced(CURL_DISABLE_COOKIES)
option(CURL_DISABLE_CRYPTO_AUTH "to disable cryptographic authentication" OFF)
mark_as_advanced(CURL_DISABLE_CRYPTO_AUTH)
option(CURL_DISABLE_VERBOSE_STRINGS "to disable verbose strings" OFF)
mark_as_advanced(CURL_DISABLE_VERBOSE_STRINGS)
option(ENABLE_IPV6 "Define if you want to enable IPv6 support" ON)
mark_as_advanced(ENABLE_IPV6)
if(ENABLE_IPV6 AND NOT WIN32)
include(CheckStructHasMember)
check_struct_has_member("struct sockaddr_in6" sin6_addr "netinet/in.h"
HAVE_SOCKADDR_IN6_SIN6_ADDR)
check_struct_has_member("struct sockaddr_in6" sin6_scope_id "netinet/in.h"
HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID)
if(NOT HAVE_SOCKADDR_IN6_SIN6_ADDR)
message(WARNING "struct sockaddr_in6 not available, disabling IPv6 support")
# Force the feature off as this name is used as guard macro...
set(ENABLE_IPV6 OFF
CACHE BOOL "Define if you want to enable IPv6 support" FORCE)
endif()
endif()
curl_nroff_check()
# We need ansi c-flags, especially on HP
set(CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}")
set(CMAKE_REQUIRED_FLAGS ${CMAKE_ANSI_CFLAGS})
# Include all the necessary files for macros
include(CheckFunctionExists)
include(CheckIncludeFile)
include(CheckIncludeFiles)
include(CheckLibraryExists)
include(CheckSymbolExists)
include(CheckTypeSize)
include(CheckCSourceCompiles)
if(ENABLE_THREADED_RESOLVER)
find_package(Threads REQUIRED)
if(WIN32)
set(USE_THREADS_WIN32 ON)
else()
set(USE_THREADS_POSIX ${CMAKE_USE_PTHREADS_INIT})
set(HAVE_PTHREAD_H ${CMAKE_USE_PTHREADS_INIT})
endif()
set(CURL_LIBS ${CURL_LIBS} ${CMAKE_THREAD_LIBS_INIT})
endif()
# Check for all needed libraries
check_library_exists_concat("${CMAKE_DL_LIBS}" dlopen HAVE_LIBDL)
check_library_exists_concat("socket" connect HAVE_LIBSOCKET)
check_library_exists("c" gethostbyname "" NOT_NEED_LIBNSL)
check_function_exists(gethostname HAVE_GETHOSTNAME)
# From cmake/find/ssl.cmake
if (OPENSSL_FOUND)
set(SSL_ENABLED ON)
set(USE_OPENSSL ON)
list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES})
check_include_file("openssl/crypto.h" HAVE_OPENSSL_CRYPTO_H)
check_include_file("openssl/err.h" HAVE_OPENSSL_ERR_H)
check_include_file("openssl/pem.h" HAVE_OPENSSL_PEM_H)
check_include_file("openssl/rsa.h" HAVE_OPENSSL_RSA_H)
check_include_file("openssl/ssl.h" HAVE_OPENSSL_SSL_H)
check_include_file("openssl/x509.h" HAVE_OPENSSL_X509_H)
check_include_file("openssl/rand.h" HAVE_OPENSSL_RAND_H)
check_symbol_exists(RAND_status "${CURL_INCLUDES}" HAVE_RAND_STATUS)
check_symbol_exists(RAND_screen "${CURL_INCLUDES}" HAVE_RAND_SCREEN)
check_symbol_exists(RAND_egd "${CURL_INCLUDES}" HAVE_RAND_EGD)
endif()
# Check for idn
check_library_exists_concat("idn2" idn2_lookup_ul HAVE_LIBIDN2)
# Check for symbol dlopen (same as HAVE_LIBDL)
check_library_exists("${CURL_LIBS}" dlopen "" HAVE_DLOPEN)
# From /cmake/find/zlib.cmake
if (ZLIB_FOUND)
set(HAVE_ZLIB_H ON)
set(HAVE_LIBZ ON)
set(USE_ZLIB ON)
list(APPEND CURL_LIBS ${ZLIB_LIBRARIES})
endif()
option(ENABLE_UNIX_SOCKETS "Define if you want Unix domain sockets support" ON)
if(ENABLE_UNIX_SOCKETS)
include(CheckStructHasMember)
check_struct_has_member("struct sockaddr_un" sun_path "sys/un.h" USE_UNIX_SOCKETS)
else()
unset(USE_UNIX_SOCKETS CACHE)
endif()
# CA handling
# Explicitly set to most common case
if (OPENSSL_FOUND)
set(CURL_CA_BUNDLE "/etc/ssl/certs/ca-certificates.crt")
set(CURL_CA_BUNDLE_SET TRUE CACHE BOOL "Path to the CA bundle has been set")
set(CURL_CA_PATH "/etc/ssl/certs")
set(CURL_CA_PATH_SET TRUE CACHE BOOL "Path to the CA bundle has been set")
endif()
check_include_file_concat("stdio.h" HAVE_STDIO_H)
check_include_file_concat("inttypes.h" HAVE_INTTYPES_H)
check_include_file_concat("sys/filio.h" HAVE_SYS_FILIO_H)
check_include_file_concat("sys/ioctl.h" HAVE_SYS_IOCTL_H)
check_include_file_concat("sys/param.h" HAVE_SYS_PARAM_H)
check_include_file_concat("sys/poll.h" HAVE_SYS_POLL_H)
check_include_file_concat("sys/resource.h" HAVE_SYS_RESOURCE_H)
check_include_file_concat("sys/select.h" HAVE_SYS_SELECT_H)
check_include_file_concat("sys/socket.h" HAVE_SYS_SOCKET_H)
check_include_file_concat("sys/sockio.h" HAVE_SYS_SOCKIO_H)
check_include_file_concat("sys/stat.h" HAVE_SYS_STAT_H)
check_include_file_concat("sys/time.h" HAVE_SYS_TIME_H)
check_include_file_concat("sys/types.h" HAVE_SYS_TYPES_H)
check_include_file_concat("sys/uio.h" HAVE_SYS_UIO_H)
check_include_file_concat("sys/un.h" HAVE_SYS_UN_H)
check_include_file_concat("sys/utime.h" HAVE_SYS_UTIME_H)
check_include_file_concat("sys/xattr.h" HAVE_SYS_XATTR_H)
check_include_file_concat("alloca.h" HAVE_ALLOCA_H)
check_include_file_concat("arpa/inet.h" HAVE_ARPA_INET_H)
check_include_file_concat("arpa/tftp.h" HAVE_ARPA_TFTP_H)
check_include_file_concat("assert.h" HAVE_ASSERT_H)
check_include_file_concat("crypto.h" HAVE_CRYPTO_H)
check_include_file_concat("des.h" HAVE_DES_H)
check_include_file_concat("err.h" HAVE_ERR_H)
check_include_file_concat("errno.h" HAVE_ERRNO_H)
check_include_file_concat("fcntl.h" HAVE_FCNTL_H)
check_include_file_concat("idn2.h" HAVE_IDN2_H)
check_include_file_concat("ifaddrs.h" HAVE_IFADDRS_H)
check_include_file_concat("io.h" HAVE_IO_H)
check_include_file_concat("krb.h" HAVE_KRB_H)
check_include_file_concat("libgen.h" HAVE_LIBGEN_H)
check_include_file_concat("locale.h" HAVE_LOCALE_H)
check_include_file_concat("net/if.h" HAVE_NET_IF_H)
check_include_file_concat("netdb.h" HAVE_NETDB_H)
check_include_file_concat("netinet/in.h" HAVE_NETINET_IN_H)
check_include_file_concat("netinet/tcp.h" HAVE_NETINET_TCP_H)
check_include_file_concat("pem.h" HAVE_PEM_H)
check_include_file_concat("poll.h" HAVE_POLL_H)
check_include_file_concat("pwd.h" HAVE_PWD_H)
check_include_file_concat("rsa.h" HAVE_RSA_H)
check_include_file_concat("setjmp.h" HAVE_SETJMP_H)
check_include_file_concat("sgtty.h" HAVE_SGTTY_H)
check_include_file_concat("signal.h" HAVE_SIGNAL_H)
check_include_file_concat("ssl.h" HAVE_SSL_H)
check_include_file_concat("stdbool.h" HAVE_STDBOOL_H)
check_include_file_concat("stdint.h" HAVE_STDINT_H)
check_include_file_concat("stdio.h" HAVE_STDIO_H)
check_include_file_concat("stdlib.h" HAVE_STDLIB_H)
check_include_file_concat("string.h" HAVE_STRING_H)
check_include_file_concat("strings.h" HAVE_STRINGS_H)
check_include_file_concat("stropts.h" HAVE_STROPTS_H)
check_include_file_concat("termio.h" HAVE_TERMIO_H)
check_include_file_concat("termios.h" HAVE_TERMIOS_H)
check_include_file_concat("time.h" HAVE_TIME_H)
check_include_file_concat("unistd.h" HAVE_UNISTD_H)
check_include_file_concat("utime.h" HAVE_UTIME_H)
check_include_file_concat("x509.h" HAVE_X509_H)
check_include_file_concat("process.h" HAVE_PROCESS_H)
check_include_file_concat("stddef.h" HAVE_STDDEF_H)
check_include_file_concat("dlfcn.h" HAVE_DLFCN_H)
check_include_file_concat("malloc.h" HAVE_MALLOC_H)
check_include_file_concat("memory.h" HAVE_MEMORY_H)
check_include_file_concat("netinet/if_ether.h" HAVE_NETINET_IF_ETHER_H)
check_include_file_concat("stdint.h" HAVE_STDINT_H)
check_include_file_concat("sockio.h" HAVE_SOCKIO_H)
check_include_file_concat("sys/utsname.h" HAVE_SYS_UTSNAME_H)
check_type_size(size_t SIZEOF_SIZE_T)
check_type_size(ssize_t SIZEOF_SSIZE_T)
check_type_size("long long" SIZEOF_LONG_LONG)
check_type_size("long" SIZEOF_LONG)
check_type_size("short" SIZEOF_SHORT)
check_type_size("int" SIZEOF_INT)
check_type_size("__int64" SIZEOF___INT64)
check_type_size("long double" SIZEOF_LONG_DOUBLE)
check_type_size("time_t" SIZEOF_TIME_T)
if(NOT HAVE_SIZEOF_SSIZE_T)
if(SIZEOF_LONG EQUAL SIZEOF_SIZE_T)
set(ssize_t long)
endif()
if(NOT ssize_t AND SIZEOF___INT64 EQUAL SIZEOF_SIZE_T)
set(ssize_t __int64)
endif()
endif()
# off_t is sized later, after the HAVE_FILE_OFFSET_BITS test
if(HAVE_SIZEOF_LONG_LONG)
set(HAVE_LONGLONG 1)
set(HAVE_LL 1)
endif()
find_file(RANDOM_FILE urandom /dev)
mark_as_advanced(RANDOM_FILE)
# Check for some functions that are used
if(HAVE_LIBWS2_32)
set(CMAKE_REQUIRED_LIBRARIES ws2_32)
elseif(HAVE_LIBSOCKET)
set(CMAKE_REQUIRED_LIBRARIES socket)
endif()
check_symbol_exists(basename "${CURL_INCLUDES}" HAVE_BASENAME)
check_symbol_exists(socket "${CURL_INCLUDES}" HAVE_SOCKET)
check_symbol_exists(select "${CURL_INCLUDES}" HAVE_SELECT)
check_symbol_exists(poll "${CURL_INCLUDES}" HAVE_POLL)
check_symbol_exists(strdup "${CURL_INCLUDES}" HAVE_STRDUP)
check_symbol_exists(strstr "${CURL_INCLUDES}" HAVE_STRSTR)
check_symbol_exists(strtok_r "${CURL_INCLUDES}" HAVE_STRTOK_R)
check_symbol_exists(strftime "${CURL_INCLUDES}" HAVE_STRFTIME)
check_symbol_exists(uname "${CURL_INCLUDES}" HAVE_UNAME)
check_symbol_exists(strcasecmp "${CURL_INCLUDES}" HAVE_STRCASECMP)
check_symbol_exists(stricmp "${CURL_INCLUDES}" HAVE_STRICMP)
check_symbol_exists(strcmpi "${CURL_INCLUDES}" HAVE_STRCMPI)
check_symbol_exists(strncmpi "${CURL_INCLUDES}" HAVE_STRNCMPI)
check_symbol_exists(alarm "${CURL_INCLUDES}" HAVE_ALARM)
if(NOT HAVE_STRNCMPI)
set(HAVE_STRCMPI)
endif()
check_symbol_exists(gethostbyaddr "${CURL_INCLUDES}" HAVE_GETHOSTBYADDR)
check_symbol_exists(gethostbyaddr_r "${CURL_INCLUDES}" HAVE_GETHOSTBYADDR_R)
check_symbol_exists(gettimeofday "${CURL_INCLUDES}" HAVE_GETTIMEOFDAY)
check_symbol_exists(inet_addr "${CURL_INCLUDES}" HAVE_INET_ADDR)
check_symbol_exists(inet_ntoa "${CURL_INCLUDES}" HAVE_INET_NTOA)
check_symbol_exists(inet_ntoa_r "${CURL_INCLUDES}" HAVE_INET_NTOA_R)
check_symbol_exists(tcsetattr "${CURL_INCLUDES}" HAVE_TCSETATTR)
check_symbol_exists(tcgetattr "${CURL_INCLUDES}" HAVE_TCGETATTR)
check_symbol_exists(perror "${CURL_INCLUDES}" HAVE_PERROR)
check_symbol_exists(closesocket "${CURL_INCLUDES}" HAVE_CLOSESOCKET)
check_symbol_exists(setvbuf "${CURL_INCLUDES}" HAVE_SETVBUF)
check_symbol_exists(sigsetjmp "${CURL_INCLUDES}" HAVE_SIGSETJMP)
check_symbol_exists(getpass_r "${CURL_INCLUDES}" HAVE_GETPASS_R)
check_symbol_exists(strlcat "${CURL_INCLUDES}" HAVE_STRLCAT)
check_symbol_exists(getpwuid "${CURL_INCLUDES}" HAVE_GETPWUID)
check_symbol_exists(getpwuid_r "${CURL_INCLUDES}" HAVE_GETPWUID_R)
check_symbol_exists(geteuid "${CURL_INCLUDES}" HAVE_GETEUID)
check_symbol_exists(usleep "${CURL_INCLUDES}" HAVE_USLEEP)
check_symbol_exists(utime "${CURL_INCLUDES}" HAVE_UTIME)
check_symbol_exists(gmtime_r "${CURL_INCLUDES}" HAVE_GMTIME_R)
check_symbol_exists(localtime_r "${CURL_INCLUDES}" HAVE_LOCALTIME_R)
check_symbol_exists(gethostbyname "${CURL_INCLUDES}" HAVE_GETHOSTBYNAME)
check_symbol_exists(gethostbyname_r "${CURL_INCLUDES}" HAVE_GETHOSTBYNAME_R)
check_symbol_exists(signal "${CURL_INCLUDES}" HAVE_SIGNAL_FUNC)
check_symbol_exists(SIGALRM "${CURL_INCLUDES}" HAVE_SIGNAL_MACRO)
if(HAVE_SIGNAL_FUNC AND HAVE_SIGNAL_MACRO)
set(HAVE_SIGNAL 1)
endif()
check_symbol_exists(uname "${CURL_INCLUDES}" HAVE_UNAME)
check_symbol_exists(strtoll "${CURL_INCLUDES}" HAVE_STRTOLL)
check_symbol_exists(_strtoi64 "${CURL_INCLUDES}" HAVE__STRTOI64)
check_symbol_exists(strerror_r "${CURL_INCLUDES}" HAVE_STRERROR_R)
check_symbol_exists(siginterrupt "${CURL_INCLUDES}" HAVE_SIGINTERRUPT)
check_symbol_exists(perror "${CURL_INCLUDES}" HAVE_PERROR)
check_symbol_exists(fork "${CURL_INCLUDES}" HAVE_FORK)
check_symbol_exists(getaddrinfo "${CURL_INCLUDES}" HAVE_GETADDRINFO)
check_symbol_exists(freeaddrinfo "${CURL_INCLUDES}" HAVE_FREEADDRINFO)
check_symbol_exists(freeifaddrs "${CURL_INCLUDES}" HAVE_FREEIFADDRS)
check_symbol_exists(pipe "${CURL_INCLUDES}" HAVE_PIPE)
check_symbol_exists(ftruncate "${CURL_INCLUDES}" HAVE_FTRUNCATE)
check_symbol_exists(getprotobyname "${CURL_INCLUDES}" HAVE_GETPROTOBYNAME)
check_symbol_exists(getpeername "${CURL_INCLUDES}" HAVE_GETPEERNAME)
check_symbol_exists(getsockname "${CURL_INCLUDES}" HAVE_GETSOCKNAME)
check_symbol_exists(if_nametoindex "${CURL_INCLUDES}" HAVE_IF_NAMETOINDEX)
check_symbol_exists(getrlimit "${CURL_INCLUDES}" HAVE_GETRLIMIT)
check_symbol_exists(setlocale "${CURL_INCLUDES}" HAVE_SETLOCALE)
check_symbol_exists(setmode "${CURL_INCLUDES}" HAVE_SETMODE)
check_symbol_exists(setrlimit "${CURL_INCLUDES}" HAVE_SETRLIMIT)
check_symbol_exists(fcntl "${CURL_INCLUDES}" HAVE_FCNTL)
check_symbol_exists(ioctl "${CURL_INCLUDES}" HAVE_IOCTL)
check_symbol_exists(setsockopt "${CURL_INCLUDES}" HAVE_SETSOCKOPT)
check_function_exists(mach_absolute_time HAVE_MACH_ABSOLUTE_TIME)
check_symbol_exists(fsetxattr "${CURL_INCLUDES}" HAVE_FSETXATTR)
if(HAVE_FSETXATTR)
foreach(CURL_TEST HAVE_FSETXATTR_5 HAVE_FSETXATTR_6)
curl_internal_test(${CURL_TEST})
endforeach()
endif()
# sigaction and sigsetjmp are special. Use special mechanism for
# detecting those, but only if previous attempt failed.
if(HAVE_SIGNAL_H)
check_symbol_exists(sigaction "signal.h" HAVE_SIGACTION)
endif()
if(NOT HAVE_SIGSETJMP)
if(HAVE_SETJMP_H)
check_symbol_exists(sigsetjmp "setjmp.h" HAVE_MACRO_SIGSETJMP)
if(HAVE_MACRO_SIGSETJMP)
set(HAVE_SIGSETJMP 1)
endif()
endif()
endif()
# If there is no stricmp(), do not allow LDAP to parse URLs
if(NOT HAVE_STRICMP)
set(HAVE_LDAP_URL_PARSE 1)
endif()
# Do curl specific tests
foreach(CURL_TEST
HAVE_FCNTL_O_NONBLOCK
HAVE_IOCTLSOCKET
HAVE_IOCTLSOCKET_CAMEL
HAVE_IOCTLSOCKET_CAMEL_FIONBIO
HAVE_IOCTLSOCKET_FIONBIO
HAVE_IOCTL_FIONBIO
HAVE_IOCTL_SIOCGIFADDR
HAVE_SETSOCKOPT_SO_NONBLOCK
HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID
TIME_WITH_SYS_TIME
HAVE_O_NONBLOCK
HAVE_GETHOSTBYADDR_R_5
HAVE_GETHOSTBYADDR_R_7
HAVE_GETHOSTBYADDR_R_8
HAVE_GETHOSTBYADDR_R_5_REENTRANT
HAVE_GETHOSTBYADDR_R_7_REENTRANT
HAVE_GETHOSTBYADDR_R_8_REENTRANT
HAVE_GETHOSTBYNAME_R_3
HAVE_GETHOSTBYNAME_R_5
HAVE_GETHOSTBYNAME_R_6
HAVE_GETHOSTBYNAME_R_3_REENTRANT
HAVE_GETHOSTBYNAME_R_5_REENTRANT
HAVE_GETHOSTBYNAME_R_6_REENTRANT
HAVE_IN_ADDR_T
HAVE_BOOL_T
STDC_HEADERS
RETSIGTYPE_TEST
HAVE_INET_NTOA_R_DECL
HAVE_INET_NTOA_R_DECL_REENTRANT
HAVE_GETADDRINFO
HAVE_FILE_OFFSET_BITS
HAVE_VARIADIC_MACROS_C99
HAVE_VARIADIC_MACROS_GCC
)
curl_internal_test(${CURL_TEST})
endforeach()
if(HAVE_FILE_OFFSET_BITS)
set(_FILE_OFFSET_BITS 64)
set(CMAKE_REQUIRED_FLAGS "-D_FILE_OFFSET_BITS=64")
endif()
check_type_size("off_t" SIZEOF_OFF_T)
# include this header to get the type
set(CMAKE_REQUIRED_INCLUDES "${CURL_SOURCE_DIR}/include")
set(CMAKE_EXTRA_INCLUDE_FILES "curl/system.h")
check_type_size("curl_off_t" SIZEOF_CURL_OFF_T)
set(CMAKE_EXTRA_INCLUDE_FILES "")
foreach(CURL_TEST
HAVE_GLIBC_STRERROR_R
HAVE_POSIX_STRERROR_R
)
curl_internal_test(${CURL_TEST})
endforeach()
# Check for reentrant
foreach(CURL_TEST
HAVE_GETHOSTBYADDR_R_5
HAVE_GETHOSTBYADDR_R_7
HAVE_GETHOSTBYADDR_R_8
HAVE_GETHOSTBYNAME_R_3
HAVE_GETHOSTBYNAME_R_5
HAVE_GETHOSTBYNAME_R_6
HAVE_INET_NTOA_R_DECL_REENTRANT)
if(NOT ${CURL_TEST})
if(${CURL_TEST}_REENTRANT)
set(NEED_REENTRANT 1)
endif()
endif()
endforeach()
if(NEED_REENTRANT)
foreach(CURL_TEST
HAVE_GETHOSTBYADDR_R_5
HAVE_GETHOSTBYADDR_R_7
HAVE_GETHOSTBYADDR_R_8
HAVE_GETHOSTBYNAME_R_3
HAVE_GETHOSTBYNAME_R_5
HAVE_GETHOSTBYNAME_R_6)
set(${CURL_TEST} 0)
if(${CURL_TEST}_REENTRANT)
set(${CURL_TEST} 1)
endif()
endforeach()
endif()
if(HAVE_INET_NTOA_R_DECL_REENTRANT)
set(HAVE_INET_NTOA_R_DECL 1)
set(NEED_REENTRANT 1)
endif()
# Check clock_gettime(CLOCK_MONOTONIC, x) support
curl_internal_test(HAVE_CLOCK_GETTIME_MONOTONIC)
# Check compiler support of __builtin_available()
curl_internal_test(HAVE_BUILTIN_AVAILABLE)
# Some other minor tests
if(NOT HAVE_IN_ADDR_T)
set(in_addr_t "unsigned long")
endif()
# Check for nonblocking
set(HAVE_DISABLED_NONBLOCKING 1)
if(HAVE_FIONBIO OR
HAVE_IOCTLSOCKET OR
HAVE_IOCTLSOCKET_CASE OR
HAVE_O_NONBLOCK)
set(HAVE_DISABLED_NONBLOCKING)
endif()
set(CURL_PULL_SYS_TYPES_H ${HAVE_SYS_TYPES_H})
set(CURL_PULL_SYS_SOCKET_H ${HAVE_SYS_SOCKET_H})
set(CURL_PULL_SYS_POLL_H ${HAVE_SYS_POLL_H})
set(CURL_PULL_STDINT_H ${HAVE_STDINT_H})
set(CURL_PULL_INTTYPES_H ${HAVE_INTTYPES_H})
include(CMake/OtherTests.cmake)
SET(LIB_VAUTH_CFILES
"${CURL_LIBRARY_DIR}/vauth/vauth.c" "${CURL_LIBRARY_DIR}/vauth/cleartext.c" "${CURL_LIBRARY_DIR}/vauth/cram.c"
"${CURL_LIBRARY_DIR}/vauth/digest.c" "${CURL_LIBRARY_DIR}/vauth/digest_sspi.c" "${CURL_LIBRARY_DIR}/vauth/krb5_gssapi.c"
"${CURL_LIBRARY_DIR}/vauth/krb5_sspi.c" "${CURL_LIBRARY_DIR}/vauth/ntlm.c" "${CURL_LIBRARY_DIR}/vauth/ntlm_sspi.c" "${CURL_LIBRARY_DIR}/vauth/oauth2.c"
"${CURL_LIBRARY_DIR}/vauth/spnego_gssapi.c" "${CURL_LIBRARY_DIR}/vauth/spnego_sspi.c")
SET(LIB_VAUTH_HFILES "${CURL_LIBRARY_DIR}/vauth/vauth.h" "${CURL_LIBRARY_DIR}/vauth/digest.h" "${CURL_LIBRARY_DIR}/vauth/ntlm.h")
SET(LIB_VTLS_CFILES "${CURL_LIBRARY_DIR}/vtls/openssl.c" "${CURL_LIBRARY_DIR}/vtls/gtls.c" "${CURL_LIBRARY_DIR}/vtls/vtls.c" "${CURL_LIBRARY_DIR}/vtls/nss.c"
"${CURL_LIBRARY_DIR}/vtls/polarssl.c" "${CURL_LIBRARY_DIR}/vtls/polarssl_threadlock.c"
"${CURL_LIBRARY_DIR}/vtls/wolfssl.c" "${CURL_LIBRARY_DIR}/vtls/schannel.c" "${CURL_LIBRARY_DIR}/vtls/schannel_verify.c"
"${CURL_LIBRARY_DIR}/vtls/sectransp.c" "${CURL_LIBRARY_DIR}/vtls/gskit.c" "${CURL_LIBRARY_DIR}/vtls/mbedtls.c" "${CURL_LIBRARY_DIR}/vtls/mesalink.c"
"${CURL_LIBRARY_DIR}/vtls/bearssl.c")
SET(LIB_VTLS_HFILES "${CURL_LIBRARY_DIR}/vtls/openssl.h" "${CURL_LIBRARY_DIR}/vtls/vtls.h" "${CURL_LIBRARY_DIR}/vtls/gtls.h"
"${CURL_LIBRARY_DIR}/vtls/nssg.h" "${CURL_LIBRARY_DIR}/vtls/polarssl.h" "${CURL_LIBRARY_DIR}/vtls/polarssl_threadlock.h"
"${CURL_LIBRARY_DIR}/vtls/wolfssl.h" "${CURL_LIBRARY_DIR}/vtls/schannel.h" "${CURL_LIBRARY_DIR}/vtls/sectransp.h" "${CURL_LIBRARY_DIR}/vtls/gskit.h"
"${CURL_LIBRARY_DIR}/vtls/mbedtls.h" "${CURL_LIBRARY_DIR}/vtls/mesalink.h" "${CURL_LIBRARY_DIR}/vtls/bearssl.h")
SET(LIB_VQUIC_CFILES "${CURL_LIBRARY_DIR}/vquic/ngtcp2.c" "${CURL_LIBRARY_DIR}/vquic/quiche.c")
SET(LIB_VQUIC_HFILES "${CURL_LIBRARY_DIR}/vquic/ngtcp2.h" "${CURL_LIBRARY_DIR}/vquic/quiche.h")
SET(LIB_VSSH_CFILES "${CURL_LIBRARY_DIR}/vssh/libssh2.c" "${CURL_LIBRARY_DIR}/vssh/libssh.c")
SET(LIB_VSSH_HFILES "${CURL_LIBRARY_DIR}/vssh/ssh.h")
SET(LIB_CFILES "${CURL_LIBRARY_DIR}/file.c"
"${CURL_LIBRARY_DIR}/timeval.c" "${CURL_LIBRARY_DIR}/base64.c" "${CURL_LIBRARY_DIR}/hostip.c" "${CURL_LIBRARY_DIR}/progress.c" "${CURL_LIBRARY_DIR}/formdata.c"
"${CURL_LIBRARY_DIR}/cookie.c" "${CURL_LIBRARY_DIR}/http.c" "${CURL_LIBRARY_DIR}/sendf.c" "${CURL_LIBRARY_DIR}/url.c" "${CURL_LIBRARY_DIR}/dict.c" "${CURL_LIBRARY_DIR}/if2ip.c" "${CURL_LIBRARY_DIR}/speedcheck.c"
"${CURL_LIBRARY_DIR}/ldap.c" "${CURL_LIBRARY_DIR}/version.c" "${CURL_LIBRARY_DIR}/getenv.c" "${CURL_LIBRARY_DIR}/escape.c" "${CURL_LIBRARY_DIR}/mprintf.c" "${CURL_LIBRARY_DIR}/telnet.c" "${CURL_LIBRARY_DIR}/netrc.c"
"${CURL_LIBRARY_DIR}/getinfo.c" "${CURL_LIBRARY_DIR}/transfer.c" "${CURL_LIBRARY_DIR}/strcase.c" "${CURL_LIBRARY_DIR}/easy.c" "${CURL_LIBRARY_DIR}/security.c" "${CURL_LIBRARY_DIR}/curl_fnmatch.c"
"${CURL_LIBRARY_DIR}/fileinfo.c" "${CURL_LIBRARY_DIR}/wildcard.c" "${CURL_LIBRARY_DIR}/krb5.c" "${CURL_LIBRARY_DIR}/memdebug.c" "${CURL_LIBRARY_DIR}/http_chunks.c"
"${CURL_LIBRARY_DIR}/strtok.c" "${CURL_LIBRARY_DIR}/connect.c" "${CURL_LIBRARY_DIR}/llist.c" "${CURL_LIBRARY_DIR}/hash.c" "${CURL_LIBRARY_DIR}/multi.c" "${CURL_LIBRARY_DIR}/content_encoding.c" "${CURL_LIBRARY_DIR}/share.c"
"${CURL_LIBRARY_DIR}/http_digest.c" "${CURL_LIBRARY_DIR}/md4.c" "${CURL_LIBRARY_DIR}/md5.c" "${CURL_LIBRARY_DIR}/http_negotiate.c" "${CURL_LIBRARY_DIR}/inet_pton.c" "${CURL_LIBRARY_DIR}/strtoofft.c"
"${CURL_LIBRARY_DIR}/strerror.c" "${CURL_LIBRARY_DIR}/amigaos.c" "${CURL_LIBRARY_DIR}/hostasyn.c" "${CURL_LIBRARY_DIR}/hostip4.c" "${CURL_LIBRARY_DIR}/hostip6.c" "${CURL_LIBRARY_DIR}/hostsyn.c"
"${CURL_LIBRARY_DIR}/inet_ntop.c" "${CURL_LIBRARY_DIR}/parsedate.c" "${CURL_LIBRARY_DIR}/select.c" "${CURL_LIBRARY_DIR}/splay.c" "${CURL_LIBRARY_DIR}/strdup.c" "${CURL_LIBRARY_DIR}/socks.c"
"${CURL_LIBRARY_DIR}/curl_addrinfo.c" "${CURL_LIBRARY_DIR}/socks_gssapi.c" "${CURL_LIBRARY_DIR}/socks_sspi.c"
"${CURL_LIBRARY_DIR}/curl_sspi.c" "${CURL_LIBRARY_DIR}/slist.c" "${CURL_LIBRARY_DIR}/nonblock.c" "${CURL_LIBRARY_DIR}/curl_memrchr.c" "${CURL_LIBRARY_DIR}/imap.c" "${CURL_LIBRARY_DIR}/pop3.c" "${CURL_LIBRARY_DIR}/smtp.c"
"${CURL_LIBRARY_DIR}/pingpong.c" "${CURL_LIBRARY_DIR}/rtsp.c" "${CURL_LIBRARY_DIR}/curl_threads.c" "${CURL_LIBRARY_DIR}/warnless.c" "${CURL_LIBRARY_DIR}/hmac.c" "${CURL_LIBRARY_DIR}/curl_rtmp.c"
"${CURL_LIBRARY_DIR}/openldap.c" "${CURL_LIBRARY_DIR}/curl_gethostname.c" "${CURL_LIBRARY_DIR}/gopher.c" "${CURL_LIBRARY_DIR}/idn_win32.c"
"${CURL_LIBRARY_DIR}/http_proxy.c" "${CURL_LIBRARY_DIR}/non-ascii.c" "${CURL_LIBRARY_DIR}/asyn-ares.c" "${CURL_LIBRARY_DIR}/asyn-thread.c" "${CURL_LIBRARY_DIR}/curl_gssapi.c"
"${CURL_LIBRARY_DIR}/http_ntlm.c" "${CURL_LIBRARY_DIR}/curl_ntlm_wb.c" "${CURL_LIBRARY_DIR}/curl_ntlm_core.c" "${CURL_LIBRARY_DIR}/curl_sasl.c" "${CURL_LIBRARY_DIR}/rand.c"
"${CURL_LIBRARY_DIR}/curl_multibyte.c" "${CURL_LIBRARY_DIR}/hostcheck.c" "${CURL_LIBRARY_DIR}/conncache.c" "${CURL_LIBRARY_DIR}/dotdot.c"
"${CURL_LIBRARY_DIR}/x509asn1.c" "${CURL_LIBRARY_DIR}/http2.c" "${CURL_LIBRARY_DIR}/smb.c" "${CURL_LIBRARY_DIR}/curl_endian.c" "${CURL_LIBRARY_DIR}/curl_des.c" "${CURL_LIBRARY_DIR}/system_win32.c"
"${CURL_LIBRARY_DIR}/mime.c" "${CURL_LIBRARY_DIR}/sha256.c" "${CURL_LIBRARY_DIR}/setopt.c" "${CURL_LIBRARY_DIR}/curl_path.c" "${CURL_LIBRARY_DIR}/curl_ctype.c" "${CURL_LIBRARY_DIR}/curl_range.c" "${CURL_LIBRARY_DIR}/psl.c"
"${CURL_LIBRARY_DIR}/doh.c" "${CURL_LIBRARY_DIR}/urlapi.c" "${CURL_LIBRARY_DIR}/curl_get_line.c" "${CURL_LIBRARY_DIR}/altsvc.c" "${CURL_LIBRARY_DIR}/socketpair.c")
SET(LIB_HFILES "${CURL_LIBRARY_DIR}/arpa_telnet.h" "${CURL_LIBRARY_DIR}/netrc.h" "${CURL_LIBRARY_DIR}/file.h" "${CURL_LIBRARY_DIR}/timeval.h" "${CURL_LIBRARY_DIR}/hostip.h" "${CURL_LIBRARY_DIR}/progress.h"
"${CURL_LIBRARY_DIR}/formdata.h" "${CURL_LIBRARY_DIR}/cookie.h" "${CURL_LIBRARY_DIR}/http.h" "${CURL_LIBRARY_DIR}/sendf.h" "${CURL_LIBRARY_DIR}/url.h" "${CURL_LIBRARY_DIR}/dict.h" "${CURL_LIBRARY_DIR}/if2ip.h"
"${CURL_LIBRARY_DIR}/speedcheck.h" "${CURL_LIBRARY_DIR}/urldata.h" "${CURL_LIBRARY_DIR}/curl_ldap.h" "${CURL_LIBRARY_DIR}/escape.h" "${CURL_LIBRARY_DIR}/telnet.h" "${CURL_LIBRARY_DIR}/getinfo.h"
"${CURL_LIBRARY_DIR}/strcase.h" "${CURL_LIBRARY_DIR}/curl_sec.h" "${CURL_LIBRARY_DIR}/memdebug.h" "${CURL_LIBRARY_DIR}/http_chunks.h" "${CURL_LIBRARY_DIR}/curl_fnmatch.h"
"${CURL_LIBRARY_DIR}/wildcard.h" "${CURL_LIBRARY_DIR}/fileinfo.h" "${CURL_LIBRARY_DIR}/strtok.h" "${CURL_LIBRARY_DIR}/connect.h" "${CURL_LIBRARY_DIR}/llist.h"
"${CURL_LIBRARY_DIR}/hash.h" "${CURL_LIBRARY_DIR}/content_encoding.h" "${CURL_LIBRARY_DIR}/share.h" "${CURL_LIBRARY_DIR}/curl_md4.h" "${CURL_LIBRARY_DIR}/curl_md5.h" "${CURL_LIBRARY_DIR}/http_digest.h"
"${CURL_LIBRARY_DIR}/http_negotiate.h" "${CURL_LIBRARY_DIR}/inet_pton.h" "${CURL_LIBRARY_DIR}/amigaos.h" "${CURL_LIBRARY_DIR}/strtoofft.h" "${CURL_LIBRARY_DIR}/strerror.h"
"${CURL_LIBRARY_DIR}/inet_ntop.h" "${CURL_LIBRARY_DIR}/curlx.h" "${CURL_LIBRARY_DIR}/curl_memory.h" "${CURL_LIBRARY_DIR}/curl_setup.h" "${CURL_LIBRARY_DIR}/transfer.h" "${CURL_LIBRARY_DIR}/select.h"
"${CURL_LIBRARY_DIR}/easyif.h" "${CURL_LIBRARY_DIR}/multiif.h" "${CURL_LIBRARY_DIR}/parsedate.h" "${CURL_LIBRARY_DIR}/sockaddr.h" "${CURL_LIBRARY_DIR}/splay.h" "${CURL_LIBRARY_DIR}/strdup.h"
"${CURL_LIBRARY_DIR}/socks.h" "${CURL_LIBRARY_DIR}/curl_base64.h" "${CURL_LIBRARY_DIR}/curl_addrinfo.h" "${CURL_LIBRARY_DIR}/curl_sspi.h"
"${CURL_LIBRARY_DIR}/slist.h" "${CURL_LIBRARY_DIR}/nonblock.h" "${CURL_LIBRARY_DIR}/curl_memrchr.h" "${CURL_LIBRARY_DIR}/imap.h" "${CURL_LIBRARY_DIR}/pop3.h" "${CURL_LIBRARY_DIR}/smtp.h" "${CURL_LIBRARY_DIR}/pingpong.h"
"${CURL_LIBRARY_DIR}/rtsp.h" "${CURL_LIBRARY_DIR}/curl_threads.h" "${CURL_LIBRARY_DIR}/warnless.h" "${CURL_LIBRARY_DIR}/curl_hmac.h" "${CURL_LIBRARY_DIR}/curl_rtmp.h"
"${CURL_LIBRARY_DIR}/curl_gethostname.h" "${CURL_LIBRARY_DIR}/gopher.h" "${CURL_LIBRARY_DIR}/http_proxy.h" "${CURL_LIBRARY_DIR}/non-ascii.h" "${CURL_LIBRARY_DIR}/asyn.h"
"${CURL_LIBRARY_DIR}/http_ntlm.h" "${CURL_LIBRARY_DIR}/curl_gssapi.h" "${CURL_LIBRARY_DIR}/curl_ntlm_wb.h" "${CURL_LIBRARY_DIR}/curl_ntlm_core.h"
"${CURL_LIBRARY_DIR}/curl_sasl.h" "${CURL_LIBRARY_DIR}/curl_multibyte.h" "${CURL_LIBRARY_DIR}/hostcheck.h" "${CURL_LIBRARY_DIR}/conncache.h"
"${CURL_LIBRARY_DIR}/multihandle.h" "${CURL_LIBRARY_DIR}/setup-vms.h" "${CURL_LIBRARY_DIR}/dotdot.h"
"${CURL_LIBRARY_DIR}/x509asn1.h" "${CURL_LIBRARY_DIR}/http2.h" "${CURL_LIBRARY_DIR}/sigpipe.h" "${CURL_LIBRARY_DIR}/smb.h" "${CURL_LIBRARY_DIR}/curl_endian.h" "${CURL_LIBRARY_DIR}/curl_des.h"
"${CURL_LIBRARY_DIR}/curl_printf.h" "${CURL_LIBRARY_DIR}/system_win32.h" "${CURL_LIBRARY_DIR}/rand.h" "${CURL_LIBRARY_DIR}/mime.h" "${CURL_LIBRARY_DIR}/curl_sha256.h" "${CURL_LIBRARY_DIR}/setopt.h"
"${CURL_LIBRARY_DIR}/curl_path.h" "${CURL_LIBRARY_DIR}/curl_ctype.h" "${CURL_LIBRARY_DIR}/curl_range.h" "${CURL_LIBRARY_DIR}/psl.h" "${CURL_LIBRARY_DIR}/doh.h" "${CURL_LIBRARY_DIR}/urlapi-int.h"
"${CURL_LIBRARY_DIR}/curl_get_line.h" "${CURL_LIBRARY_DIR}/altsvc.h" "${CURL_LIBRARY_DIR}/quic.h" "${CURL_LIBRARY_DIR}/socketpair.h")
SET(LIB_RCFILES "${CURL_LIBRARY_DIR}/libcurl.rc")
SET(CSOURCES ${LIB_CFILES} ${LIB_VAUTH_CFILES} ${LIB_VTLS_CFILES}
${LIB_VQUIC_CFILES} ${LIB_VSSH_CFILES})
SET(HHEADERS ${LIB_HFILES} ${LIB_VAUTH_HFILES} ${LIB_VTLS_HFILES}
${LIB_VQUIC_HFILES} ${LIB_VSSH_HFILES})
configure_file(${CURL_SOURCE_DIR}/lib/curl_config.h.cmake
${CMAKE_CURRENT_BINARY_DIR}/curl/curl_config.h)
list(APPEND HHEADERS
${CMAKE_CURRENT_BINARY_DIR}/curl/curl_config.h
)
add_library(libcurl ${HHEADERS} ${CSOURCES})
if(NOT BUILD_SHARED_LIBS)
set_target_properties(libcurl PROPERTIES INTERFACE_COMPILE_DEFINITIONS CURL_STATICLIB)
endif()
if(HIDES_CURL_PRIVATE_SYMBOLS)
set_property(TARGET libcurl APPEND PROPERTY COMPILE_DEFINITIONS "CURL_HIDDEN_SYMBOLS")
set_property(TARGET libcurl APPEND PROPERTY COMPILE_FLAGS ${CURL_CFLAG_SYMBOLS_HIDE})
endif()
if(OPENSSL_FOUND)
target_include_directories(libcurl PUBLIC ${OPENSSL_INCLUDE_DIR})
message("-- Including openssl ${OPENSSL_INCLUDE_DIR} to curl")
endif()
if(ZLIB_FOUND)
target_include_directories(libcurl PUBLIC ${ZLIB_INCLUDE_DIRS}})
message("-- Including zlib ${ZLIB_INCLUDE_DIRS} to curl")
endif()
target_compile_definitions(libcurl PUBLIC -DHAVE_CONFIG_H)
target_compile_definitions(libcurl PUBLIC -DBUILDING_LIBCURL)
target_include_directories(libcurl PUBLIC "${CURL_SOURCE_DIR}/include" "${CURL_LIBRARY_DIR}" "${CMAKE_CURRENT_BINARY_DIR}/curl")
target_link_libraries(libcurl ${CURL_LIBS})

1
contrib/icu vendored Submodule

@ -0,0 +1 @@
Subproject commit faa2f9f9e1fe74c5ed00eba371d2830134cdbea1

View File

@ -0,0 +1,459 @@
set(ICU_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source)
set(ICUDATA_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/icudata/)
# These lists of sources were generated from build log of the original ICU build system (configure + make).
set(ICUUC_SOURCES
${ICU_SOURCE_DIR}/common/errorcode.cpp
${ICU_SOURCE_DIR}/common/putil.cpp
${ICU_SOURCE_DIR}/common/umath.cpp
${ICU_SOURCE_DIR}/common/utypes.cpp
${ICU_SOURCE_DIR}/common/uinvchar.cpp
${ICU_SOURCE_DIR}/common/umutex.cpp
${ICU_SOURCE_DIR}/common/ucln_cmn.cpp
${ICU_SOURCE_DIR}/common/uinit.cpp
${ICU_SOURCE_DIR}/common/uobject.cpp
${ICU_SOURCE_DIR}/common/cmemory.cpp
${ICU_SOURCE_DIR}/common/charstr.cpp
${ICU_SOURCE_DIR}/common/cstr.cpp
${ICU_SOURCE_DIR}/common/udata.cpp
${ICU_SOURCE_DIR}/common/ucmndata.cpp
${ICU_SOURCE_DIR}/common/udatamem.cpp
${ICU_SOURCE_DIR}/common/umapfile.cpp
${ICU_SOURCE_DIR}/common/udataswp.cpp
${ICU_SOURCE_DIR}/common/utrie_swap.cpp
${ICU_SOURCE_DIR}/common/ucol_swp.cpp
${ICU_SOURCE_DIR}/common/utrace.cpp
${ICU_SOURCE_DIR}/common/uhash.cpp
${ICU_SOURCE_DIR}/common/uhash_us.cpp
${ICU_SOURCE_DIR}/common/uenum.cpp
${ICU_SOURCE_DIR}/common/ustrenum.cpp
${ICU_SOURCE_DIR}/common/uvector.cpp
${ICU_SOURCE_DIR}/common/ustack.cpp
${ICU_SOURCE_DIR}/common/uvectr32.cpp
${ICU_SOURCE_DIR}/common/uvectr64.cpp
${ICU_SOURCE_DIR}/common/ucnv.cpp
${ICU_SOURCE_DIR}/common/ucnv_bld.cpp
${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp
${ICU_SOURCE_DIR}/common/ucnv_io.cpp
${ICU_SOURCE_DIR}/common/ucnv_cb.cpp
${ICU_SOURCE_DIR}/common/ucnv_err.cpp
${ICU_SOURCE_DIR}/common/ucnvlat1.cpp
${ICU_SOURCE_DIR}/common/ucnv_u7.cpp
${ICU_SOURCE_DIR}/common/ucnv_u8.cpp
${ICU_SOURCE_DIR}/common/ucnv_u16.cpp
${ICU_SOURCE_DIR}/common/ucnv_u32.cpp
${ICU_SOURCE_DIR}/common/ucnvscsu.cpp
${ICU_SOURCE_DIR}/common/ucnvbocu.cpp
${ICU_SOURCE_DIR}/common/ucnv_ext.cpp
${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp
${ICU_SOURCE_DIR}/common/ucnv2022.cpp
${ICU_SOURCE_DIR}/common/ucnvhz.cpp
${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp
${ICU_SOURCE_DIR}/common/ucnvisci.cpp
${ICU_SOURCE_DIR}/common/ucnvdisp.cpp
${ICU_SOURCE_DIR}/common/ucnv_set.cpp
${ICU_SOURCE_DIR}/common/ucnv_ct.cpp
${ICU_SOURCE_DIR}/common/resource.cpp
${ICU_SOURCE_DIR}/common/uresbund.cpp
${ICU_SOURCE_DIR}/common/ures_cnv.cpp
${ICU_SOURCE_DIR}/common/uresdata.cpp
${ICU_SOURCE_DIR}/common/resbund.cpp
${ICU_SOURCE_DIR}/common/resbund_cnv.cpp
${ICU_SOURCE_DIR}/common/ucurr.cpp
${ICU_SOURCE_DIR}/common/localebuilder.cpp
${ICU_SOURCE_DIR}/common/localeprioritylist.cpp
${ICU_SOURCE_DIR}/common/messagepattern.cpp
${ICU_SOURCE_DIR}/common/ucat.cpp
${ICU_SOURCE_DIR}/common/locmap.cpp
${ICU_SOURCE_DIR}/common/uloc.cpp
${ICU_SOURCE_DIR}/common/locid.cpp
${ICU_SOURCE_DIR}/common/locutil.cpp
${ICU_SOURCE_DIR}/common/locavailable.cpp
${ICU_SOURCE_DIR}/common/locdispnames.cpp
${ICU_SOURCE_DIR}/common/locdspnm.cpp
${ICU_SOURCE_DIR}/common/loclikely.cpp
${ICU_SOURCE_DIR}/common/locresdata.cpp
${ICU_SOURCE_DIR}/common/lsr.cpp
${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp
${ICU_SOURCE_DIR}/common/locdistance.cpp
${ICU_SOURCE_DIR}/common/localematcher.cpp
${ICU_SOURCE_DIR}/common/bytestream.cpp
${ICU_SOURCE_DIR}/common/stringpiece.cpp
${ICU_SOURCE_DIR}/common/bytesinkutil.cpp
${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp
${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp
${ICU_SOURCE_DIR}/common/bytestrie.cpp
${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp
${ICU_SOURCE_DIR}/common/ucharstrie.cpp
${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp
${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp
${ICU_SOURCE_DIR}/common/dictionarydata.cpp
${ICU_SOURCE_DIR}/common/edits.cpp
${ICU_SOURCE_DIR}/common/appendable.cpp
${ICU_SOURCE_DIR}/common/ustr_cnv.cpp
${ICU_SOURCE_DIR}/common/unistr_cnv.cpp
${ICU_SOURCE_DIR}/common/unistr.cpp
${ICU_SOURCE_DIR}/common/unistr_case.cpp
${ICU_SOURCE_DIR}/common/unistr_props.cpp
${ICU_SOURCE_DIR}/common/utf_impl.cpp
${ICU_SOURCE_DIR}/common/ustring.cpp
${ICU_SOURCE_DIR}/common/ustrcase.cpp
${ICU_SOURCE_DIR}/common/ucasemap.cpp
${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp
${ICU_SOURCE_DIR}/common/cstring.cpp
${ICU_SOURCE_DIR}/common/ustrfmt.cpp
${ICU_SOURCE_DIR}/common/ustrtrns.cpp
${ICU_SOURCE_DIR}/common/ustr_wcs.cpp
${ICU_SOURCE_DIR}/common/utext.cpp
${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp
${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp
${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp
${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp
${ICU_SOURCE_DIR}/common/normalizer2impl.cpp
${ICU_SOURCE_DIR}/common/normalizer2.cpp
${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp
${ICU_SOURCE_DIR}/common/normlzr.cpp
${ICU_SOURCE_DIR}/common/unorm.cpp
${ICU_SOURCE_DIR}/common/unormcmp.cpp
${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp
${ICU_SOURCE_DIR}/common/chariter.cpp
${ICU_SOURCE_DIR}/common/schriter.cpp
${ICU_SOURCE_DIR}/common/uchriter.cpp
${ICU_SOURCE_DIR}/common/uiter.cpp
${ICU_SOURCE_DIR}/common/patternprops.cpp
${ICU_SOURCE_DIR}/common/uchar.cpp
${ICU_SOURCE_DIR}/common/uprops.cpp
${ICU_SOURCE_DIR}/common/ucase.cpp
${ICU_SOURCE_DIR}/common/propname.cpp
${ICU_SOURCE_DIR}/common/ubidi_props.cpp
${ICU_SOURCE_DIR}/common/characterproperties.cpp
${ICU_SOURCE_DIR}/common/ubidi.cpp
${ICU_SOURCE_DIR}/common/ubidiwrt.cpp
${ICU_SOURCE_DIR}/common/ubidiln.cpp
${ICU_SOURCE_DIR}/common/ushape.cpp
${ICU_SOURCE_DIR}/common/uscript.cpp
${ICU_SOURCE_DIR}/common/uscript_props.cpp
${ICU_SOURCE_DIR}/common/usc_impl.cpp
${ICU_SOURCE_DIR}/common/unames.cpp
${ICU_SOURCE_DIR}/common/utrie.cpp
${ICU_SOURCE_DIR}/common/utrie2.cpp
${ICU_SOURCE_DIR}/common/utrie2_builder.cpp
${ICU_SOURCE_DIR}/common/ucptrie.cpp
${ICU_SOURCE_DIR}/common/umutablecptrie.cpp
${ICU_SOURCE_DIR}/common/bmpset.cpp
${ICU_SOURCE_DIR}/common/unisetspan.cpp
${ICU_SOURCE_DIR}/common/uset_props.cpp
${ICU_SOURCE_DIR}/common/uniset_props.cpp
${ICU_SOURCE_DIR}/common/uniset_closure.cpp
${ICU_SOURCE_DIR}/common/uset.cpp
${ICU_SOURCE_DIR}/common/uniset.cpp
${ICU_SOURCE_DIR}/common/usetiter.cpp
${ICU_SOURCE_DIR}/common/ruleiter.cpp
${ICU_SOURCE_DIR}/common/caniter.cpp
${ICU_SOURCE_DIR}/common/unifilt.cpp
${ICU_SOURCE_DIR}/common/unifunct.cpp
${ICU_SOURCE_DIR}/common/uarrsort.cpp
${ICU_SOURCE_DIR}/common/brkiter.cpp
${ICU_SOURCE_DIR}/common/ubrk.cpp
${ICU_SOURCE_DIR}/common/brkeng.cpp
${ICU_SOURCE_DIR}/common/dictbe.cpp
${ICU_SOURCE_DIR}/common/filteredbrk.cpp
${ICU_SOURCE_DIR}/common/rbbi.cpp
${ICU_SOURCE_DIR}/common/rbbidata.cpp
${ICU_SOURCE_DIR}/common/rbbinode.cpp
${ICU_SOURCE_DIR}/common/rbbirb.cpp
${ICU_SOURCE_DIR}/common/rbbiscan.cpp
${ICU_SOURCE_DIR}/common/rbbisetb.cpp
${ICU_SOURCE_DIR}/common/rbbistbl.cpp
${ICU_SOURCE_DIR}/common/rbbitblb.cpp
${ICU_SOURCE_DIR}/common/rbbi_cache.cpp
${ICU_SOURCE_DIR}/common/serv.cpp
${ICU_SOURCE_DIR}/common/servnotf.cpp
${ICU_SOURCE_DIR}/common/servls.cpp
${ICU_SOURCE_DIR}/common/servlk.cpp
${ICU_SOURCE_DIR}/common/servlkf.cpp
${ICU_SOURCE_DIR}/common/servrbf.cpp
${ICU_SOURCE_DIR}/common/servslkf.cpp
${ICU_SOURCE_DIR}/common/uidna.cpp
${ICU_SOURCE_DIR}/common/usprep.cpp
${ICU_SOURCE_DIR}/common/uts46.cpp
${ICU_SOURCE_DIR}/common/punycode.cpp
${ICU_SOURCE_DIR}/common/util.cpp
${ICU_SOURCE_DIR}/common/util_props.cpp
${ICU_SOURCE_DIR}/common/parsepos.cpp
${ICU_SOURCE_DIR}/common/locbased.cpp
${ICU_SOURCE_DIR}/common/cwchar.cpp
${ICU_SOURCE_DIR}/common/wintz.cpp
${ICU_SOURCE_DIR}/common/dtintrv.cpp
${ICU_SOURCE_DIR}/common/ucnvsel.cpp
${ICU_SOURCE_DIR}/common/propsvec.cpp
${ICU_SOURCE_DIR}/common/ulist.cpp
${ICU_SOURCE_DIR}/common/uloc_tag.cpp
${ICU_SOURCE_DIR}/common/icudataver.cpp
${ICU_SOURCE_DIR}/common/icuplug.cpp
${ICU_SOURCE_DIR}/common/sharedobject.cpp
${ICU_SOURCE_DIR}/common/simpleformatter.cpp
${ICU_SOURCE_DIR}/common/unifiedcache.cpp
${ICU_SOURCE_DIR}/common/uloc_keytype.cpp
${ICU_SOURCE_DIR}/common/ubiditransform.cpp
${ICU_SOURCE_DIR}/common/pluralmap.cpp
${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp
${ICU_SOURCE_DIR}/common/restrace.cpp)
set(ICUI18N_SOURCES
${ICU_SOURCE_DIR}/i18n/ucln_in.cpp
${ICU_SOURCE_DIR}/i18n/fmtable.cpp
${ICU_SOURCE_DIR}/i18n/format.cpp
${ICU_SOURCE_DIR}/i18n/msgfmt.cpp
${ICU_SOURCE_DIR}/i18n/umsg.cpp
${ICU_SOURCE_DIR}/i18n/numfmt.cpp
${ICU_SOURCE_DIR}/i18n/unum.cpp
${ICU_SOURCE_DIR}/i18n/decimfmt.cpp
${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp
${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp
${ICU_SOURCE_DIR}/i18n/choicfmt.cpp
${ICU_SOURCE_DIR}/i18n/datefmt.cpp
${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp
${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp
${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp
${ICU_SOURCE_DIR}/i18n/udat.cpp
${ICU_SOURCE_DIR}/i18n/dtptngen.cpp
${ICU_SOURCE_DIR}/i18n/udatpg.cpp
${ICU_SOURCE_DIR}/i18n/nfrs.cpp
${ICU_SOURCE_DIR}/i18n/nfrule.cpp
${ICU_SOURCE_DIR}/i18n/nfsubs.cpp
${ICU_SOURCE_DIR}/i18n/rbnf.cpp
${ICU_SOURCE_DIR}/i18n/numsys.cpp
${ICU_SOURCE_DIR}/i18n/unumsys.cpp
${ICU_SOURCE_DIR}/i18n/ucsdet.cpp
${ICU_SOURCE_DIR}/i18n/ucal.cpp
${ICU_SOURCE_DIR}/i18n/calendar.cpp
${ICU_SOURCE_DIR}/i18n/gregocal.cpp
${ICU_SOURCE_DIR}/i18n/timezone.cpp
${ICU_SOURCE_DIR}/i18n/simpletz.cpp
${ICU_SOURCE_DIR}/i18n/olsontz.cpp
${ICU_SOURCE_DIR}/i18n/astro.cpp
${ICU_SOURCE_DIR}/i18n/taiwncal.cpp
${ICU_SOURCE_DIR}/i18n/buddhcal.cpp
${ICU_SOURCE_DIR}/i18n/persncal.cpp
${ICU_SOURCE_DIR}/i18n/islamcal.cpp
${ICU_SOURCE_DIR}/i18n/japancal.cpp
${ICU_SOURCE_DIR}/i18n/gregoimp.cpp
${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp
${ICU_SOURCE_DIR}/i18n/indiancal.cpp
${ICU_SOURCE_DIR}/i18n/chnsecal.cpp
${ICU_SOURCE_DIR}/i18n/cecal.cpp
${ICU_SOURCE_DIR}/i18n/coptccal.cpp
${ICU_SOURCE_DIR}/i18n/dangical.cpp
${ICU_SOURCE_DIR}/i18n/ethpccal.cpp
${ICU_SOURCE_DIR}/i18n/coleitr.cpp
${ICU_SOURCE_DIR}/i18n/coll.cpp
${ICU_SOURCE_DIR}/i18n/sortkey.cpp
${ICU_SOURCE_DIR}/i18n/bocsu.cpp
${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp
${ICU_SOURCE_DIR}/i18n/ucol.cpp
${ICU_SOURCE_DIR}/i18n/ucol_res.cpp
${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp
${ICU_SOURCE_DIR}/i18n/collation.cpp
${ICU_SOURCE_DIR}/i18n/collationsettings.cpp
${ICU_SOURCE_DIR}/i18n/collationdata.cpp
${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp
${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp
${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp
${ICU_SOURCE_DIR}/i18n/collationfcd.cpp
${ICU_SOURCE_DIR}/i18n/collationiterator.cpp
${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp
${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp
${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp
${ICU_SOURCE_DIR}/i18n/collationsets.cpp
${ICU_SOURCE_DIR}/i18n/collationcompare.cpp
${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp
${ICU_SOURCE_DIR}/i18n/collationkeys.cpp
${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp
${ICU_SOURCE_DIR}/i18n/collationroot.cpp
${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp
${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp
${ICU_SOURCE_DIR}/i18n/collationweights.cpp
${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp
${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp
${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp
${ICU_SOURCE_DIR}/i18n/listformatter.cpp
${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp
${ICU_SOURCE_DIR}/i18n/strmatch.cpp
${ICU_SOURCE_DIR}/i18n/usearch.cpp
${ICU_SOURCE_DIR}/i18n/search.cpp
${ICU_SOURCE_DIR}/i18n/stsearch.cpp
${ICU_SOURCE_DIR}/i18n/translit.cpp
${ICU_SOURCE_DIR}/i18n/utrans.cpp
${ICU_SOURCE_DIR}/i18n/esctrn.cpp
${ICU_SOURCE_DIR}/i18n/unesctrn.cpp
${ICU_SOURCE_DIR}/i18n/funcrepl.cpp
${ICU_SOURCE_DIR}/i18n/strrepl.cpp
${ICU_SOURCE_DIR}/i18n/tridpars.cpp
${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp
${ICU_SOURCE_DIR}/i18n/rbt.cpp
${ICU_SOURCE_DIR}/i18n/rbt_data.cpp
${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp
${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp
${ICU_SOURCE_DIR}/i18n/rbt_set.cpp
${ICU_SOURCE_DIR}/i18n/nultrans.cpp
${ICU_SOURCE_DIR}/i18n/remtrans.cpp
${ICU_SOURCE_DIR}/i18n/casetrn.cpp
${ICU_SOURCE_DIR}/i18n/titletrn.cpp
${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp
${ICU_SOURCE_DIR}/i18n/toupptrn.cpp
${ICU_SOURCE_DIR}/i18n/anytrans.cpp
${ICU_SOURCE_DIR}/i18n/name2uni.cpp
${ICU_SOURCE_DIR}/i18n/uni2name.cpp
${ICU_SOURCE_DIR}/i18n/nortrans.cpp
${ICU_SOURCE_DIR}/i18n/quant.cpp
${ICU_SOURCE_DIR}/i18n/transreg.cpp
${ICU_SOURCE_DIR}/i18n/brktrans.cpp
${ICU_SOURCE_DIR}/i18n/regexcmp.cpp
${ICU_SOURCE_DIR}/i18n/rematch.cpp
${ICU_SOURCE_DIR}/i18n/repattrn.cpp
${ICU_SOURCE_DIR}/i18n/regexst.cpp
${ICU_SOURCE_DIR}/i18n/regextxt.cpp
${ICU_SOURCE_DIR}/i18n/regeximp.cpp
${ICU_SOURCE_DIR}/i18n/uregex.cpp
${ICU_SOURCE_DIR}/i18n/uregexc.cpp
${ICU_SOURCE_DIR}/i18n/ulocdata.cpp
${ICU_SOURCE_DIR}/i18n/measfmt.cpp
${ICU_SOURCE_DIR}/i18n/currfmt.cpp
${ICU_SOURCE_DIR}/i18n/curramt.cpp
${ICU_SOURCE_DIR}/i18n/currunit.cpp
${ICU_SOURCE_DIR}/i18n/measure.cpp
${ICU_SOURCE_DIR}/i18n/utmscale.cpp
${ICU_SOURCE_DIR}/i18n/csdetect.cpp
${ICU_SOURCE_DIR}/i18n/csmatch.cpp
${ICU_SOURCE_DIR}/i18n/csr2022.cpp
${ICU_SOURCE_DIR}/i18n/csrecog.cpp
${ICU_SOURCE_DIR}/i18n/csrmbcs.cpp
${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp
${ICU_SOURCE_DIR}/i18n/csrucode.cpp
${ICU_SOURCE_DIR}/i18n/csrutf8.cpp
${ICU_SOURCE_DIR}/i18n/inputext.cpp
${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp
${ICU_SOURCE_DIR}/i18n/windtfmt.cpp
${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp
${ICU_SOURCE_DIR}/i18n/basictz.cpp
${ICU_SOURCE_DIR}/i18n/dtrule.cpp
${ICU_SOURCE_DIR}/i18n/rbtz.cpp
${ICU_SOURCE_DIR}/i18n/tzrule.cpp
${ICU_SOURCE_DIR}/i18n/tztrans.cpp
${ICU_SOURCE_DIR}/i18n/vtzone.cpp
${ICU_SOURCE_DIR}/i18n/zonemeta.cpp
${ICU_SOURCE_DIR}/i18n/standardplural.cpp
${ICU_SOURCE_DIR}/i18n/upluralrules.cpp
${ICU_SOURCE_DIR}/i18n/plurrule.cpp
${ICU_SOURCE_DIR}/i18n/plurfmt.cpp
${ICU_SOURCE_DIR}/i18n/selfmt.cpp
${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp
${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp
${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp
${ICU_SOURCE_DIR}/i18n/tmunit.cpp
${ICU_SOURCE_DIR}/i18n/tmutamt.cpp
${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp
${ICU_SOURCE_DIR}/i18n/currpinf.cpp
${ICU_SOURCE_DIR}/i18n/uspoof.cpp
${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp
${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp
${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp
${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp
${ICU_SOURCE_DIR}/i18n/ztrans.cpp
${ICU_SOURCE_DIR}/i18n/zrule.cpp
${ICU_SOURCE_DIR}/i18n/vzone.cpp
${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp
${ICU_SOURCE_DIR}/i18n/fpositer.cpp
${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp
${ICU_SOURCE_DIR}/i18n/decNumber.cpp
${ICU_SOURCE_DIR}/i18n/decContext.cpp
${ICU_SOURCE_DIR}/i18n/alphaindex.cpp
${ICU_SOURCE_DIR}/i18n/tznames.cpp
${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp
${ICU_SOURCE_DIR}/i18n/tzgnames.cpp
${ICU_SOURCE_DIR}/i18n/tzfmt.cpp
${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp
${ICU_SOURCE_DIR}/i18n/gender.cpp
${ICU_SOURCE_DIR}/i18n/region.cpp
${ICU_SOURCE_DIR}/i18n/scriptset.cpp
${ICU_SOURCE_DIR}/i18n/uregion.cpp
${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp
${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp
${ICU_SOURCE_DIR}/i18n/measunit.cpp
${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp
${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp
${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp
${ICU_SOURCE_DIR}/i18n/nounit.cpp
${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp
${ICU_SOURCE_DIR}/i18n/number_compact.cpp
${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp
${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp
${ICU_SOURCE_DIR}/i18n/number_fluent.cpp
${ICU_SOURCE_DIR}/i18n/number_formatimpl.cpp
${ICU_SOURCE_DIR}/i18n/number_grouping.cpp
${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp
${ICU_SOURCE_DIR}/i18n/number_longnames.cpp
${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp
${ICU_SOURCE_DIR}/i18n/number_notation.cpp
${ICU_SOURCE_DIR}/i18n/number_output.cpp
${ICU_SOURCE_DIR}/i18n/number_padding.cpp
${ICU_SOURCE_DIR}/i18n/number_patternmodifier.cpp
${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp
${ICU_SOURCE_DIR}/i18n/number_rounding.cpp
${ICU_SOURCE_DIR}/i18n/number_scientific.cpp
${ICU_SOURCE_DIR}/i18n/number_utils.cpp
${ICU_SOURCE_DIR}/i18n/number_asformat.cpp
${ICU_SOURCE_DIR}/i18n/number_mapper.cpp
${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp
${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp
${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp
${ICU_SOURCE_DIR}/i18n/number_capi.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp
${ICU_SOURCE_DIR}/i18n/string_segment.cpp
${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp
${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp
${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp
${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp
${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp
${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp
${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp
${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp
${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp
${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp
${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp
${ICU_SOURCE_DIR}/i18n/erarules.cpp
${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp
${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp
${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp
${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp)
enable_language(ASM)
set(ICUDATA_SOURCES ${ICUDATA_SOURCE_DIR}/icudt66l_dat.S)
# Note that we don't like any kind of binary plugins (because of runtime dependencies, vulnerabilities, ABI incompatibilities).
add_definitions(-D_REENTRANT -DU_HAVE_ELF_H=1 -DU_HAVE_STRTOD_L=1 -DU_HAVE_XLOCALE_H=0 -DDEFAULT_ICU_PLUGINS="/dev/null")
add_library(icuuc ${ICUUC_SOURCES})
add_library(icui18n ${ICUI18N_SOURCES})
add_library(icudata ${ICUDATA_SOURCES})
target_link_libraries(icuuc icudata)
target_link_libraries(icui18n icuuc)
target_include_directories(icuuc SYSTEM PUBLIC ${ICU_SOURCE_DIR}/common/)
target_include_directories(icui18n SYSTEM PUBLIC ${ICU_SOURCE_DIR}/i18n/)
target_compile_definitions(icuuc PRIVATE -DU_COMMON_IMPLEMENTATION)
target_compile_definitions(icui18n PRIVATE -DU_I18N_IMPLEMENTATION)
if (COMPILER_CLANG)
target_compile_options(icudata PRIVATE -Wno-unused-command-line-argument)
endif ()

1
contrib/icudata vendored Submodule

@ -0,0 +1 @@
Subproject commit f020820388e3faafb44cc643574a2d563dfde572

View File

@ -297,7 +297,7 @@
* MADV_FREE, though typically with higher * MADV_FREE, though typically with higher
* system overhead. * system overhead.
*/ */
#define JEMALLOC_PURGE_MADVISE_FREE // #define JEMALLOC_PURGE_MADVISE_FREE
#define JEMALLOC_PURGE_MADVISE_DONTNEED #define JEMALLOC_PURGE_MADVISE_DONTNEED
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS #define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS

View File

@ -3,9 +3,4 @@ add_library(btrie
include/btrie.h include/btrie.h
) )
target_include_directories (btrie PUBLIC include) target_include_directories (btrie SYSTEM PUBLIC include)
if (ENABLE_TESTS)
add_executable(test_btrie test/test_btrie.c)
target_link_libraries(test_btrie btrie)
endif ()

1
contrib/libc-headers vendored Submodule

@ -0,0 +1 @@
Subproject commit cd82fd9d8eefe50a47a0adf7c617c3ea7d558d11

2
contrib/libcxx vendored

@ -1 +1 @@
Subproject commit 9807685d51db467e097ad5eb8d5c2c16922794b2 Subproject commit f7c63235238a71b7e0563fab8c7c5ec1b54831f6

View File

@ -1,41 +1,45 @@
include(CheckCXXCompilerFlag)
set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx) set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx)
set(SRCS set(SRCS
${LIBCXX_SOURCE_DIR}/src/optional.cpp
${LIBCXX_SOURCE_DIR}/src/variant.cpp
${LIBCXX_SOURCE_DIR}/src/chrono.cpp
${LIBCXX_SOURCE_DIR}/src/thread.cpp
${LIBCXX_SOURCE_DIR}/src/experimental/memory_resource.cpp
${LIBCXX_SOURCE_DIR}/src/iostream.cpp
${LIBCXX_SOURCE_DIR}/src/strstream.cpp
${LIBCXX_SOURCE_DIR}/src/ios.cpp
${LIBCXX_SOURCE_DIR}/src/future.cpp
${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp
${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp
${LIBCXX_SOURCE_DIR}/src/hash.cpp
${LIBCXX_SOURCE_DIR}/src/string.cpp
${LIBCXX_SOURCE_DIR}/src/debug.cpp
${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp
${LIBCXX_SOURCE_DIR}/src/utility.cpp
${LIBCXX_SOURCE_DIR}/src/any.cpp
${LIBCXX_SOURCE_DIR}/src/exception.cpp
${LIBCXX_SOURCE_DIR}/src/memory.cpp
${LIBCXX_SOURCE_DIR}/src/new.cpp
${LIBCXX_SOURCE_DIR}/src/valarray.cpp
${LIBCXX_SOURCE_DIR}/src/vector.cpp
${LIBCXX_SOURCE_DIR}/src/algorithm.cpp ${LIBCXX_SOURCE_DIR}/src/algorithm.cpp
${LIBCXX_SOURCE_DIR}/src/functional.cpp ${LIBCXX_SOURCE_DIR}/src/any.cpp
${LIBCXX_SOURCE_DIR}/src/regex.cpp
${LIBCXX_SOURCE_DIR}/src/bind.cpp ${LIBCXX_SOURCE_DIR}/src/bind.cpp
${LIBCXX_SOURCE_DIR}/src/mutex.cpp
${LIBCXX_SOURCE_DIR}/src/charconv.cpp ${LIBCXX_SOURCE_DIR}/src/charconv.cpp
${LIBCXX_SOURCE_DIR}/src/typeinfo.cpp ${LIBCXX_SOURCE_DIR}/src/chrono.cpp
${LIBCXX_SOURCE_DIR}/src/locale.cpp ${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp ${LIBCXX_SOURCE_DIR}/src/condition_variable_destructor.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp ${LIBCXX_SOURCE_DIR}/src/debug.cpp
${LIBCXX_SOURCE_DIR}/src/exception.cpp
${LIBCXX_SOURCE_DIR}/src/experimental/memory_resource.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp ${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp
${LIBCXX_SOURCE_DIR}/src/system_error.cpp ${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp
${LIBCXX_SOURCE_DIR}/src/functional.cpp
${LIBCXX_SOURCE_DIR}/src/future.cpp
${LIBCXX_SOURCE_DIR}/src/hash.cpp
${LIBCXX_SOURCE_DIR}/src/ios.cpp
${LIBCXX_SOURCE_DIR}/src/iostream.cpp
${LIBCXX_SOURCE_DIR}/src/locale.cpp
${LIBCXX_SOURCE_DIR}/src/memory.cpp
${LIBCXX_SOURCE_DIR}/src/mutex.cpp
${LIBCXX_SOURCE_DIR}/src/mutex_destructor.cpp
${LIBCXX_SOURCE_DIR}/src/new.cpp
${LIBCXX_SOURCE_DIR}/src/optional.cpp
${LIBCXX_SOURCE_DIR}/src/random.cpp ${LIBCXX_SOURCE_DIR}/src/random.cpp
${LIBCXX_SOURCE_DIR}/src/regex.cpp
${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp
${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp
${LIBCXX_SOURCE_DIR}/src/string.cpp
${LIBCXX_SOURCE_DIR}/src/strstream.cpp
${LIBCXX_SOURCE_DIR}/src/system_error.cpp
${LIBCXX_SOURCE_DIR}/src/thread.cpp
${LIBCXX_SOURCE_DIR}/src/typeinfo.cpp
${LIBCXX_SOURCE_DIR}/src/utility.cpp
${LIBCXX_SOURCE_DIR}/src/valarray.cpp
${LIBCXX_SOURCE_DIR}/src/variant.cpp
${LIBCXX_SOURCE_DIR}/src/vector.cpp
) )
add_library(cxx ${SRCS}) add_library(cxx ${SRCS})
@ -43,8 +47,15 @@ add_library(cxx ${SRCS})
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>) target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>)
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI) target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
target_compile_options(cxx PUBLIC -nostdinc++ -Wno-reserved-id-macro) target_compile_options(cxx PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++>)
if (OS_DARWIN AND (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9) AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11))
check_cxx_compiler_flag(-Wreserved-id-macro HAVE_WARNING_RESERVED_ID_MACRO)
if (HAVE_WARNING_RESERVED_ID_MACRO)
target_compile_options(cxx PUBLIC -Wno-reserved-id-macro)
endif ()
check_cxx_compiler_flag(-Wctad-maybe-unsupported HAVE_WARNING_CTAD_MAYBE_UNSUPPORTED)
if (HAVE_WARNING_CTAD_MAYBE_UNSUPPORTED)
target_compile_options(cxx PUBLIC -Wno-ctad-maybe-unsupported) target_compile_options(cxx PUBLIC -Wno-ctad-maybe-unsupported)
endif () endif ()

2
contrib/libcxxabi vendored

@ -1 +1 @@
Subproject commit d56efcc7a52739518dbe7df9e743073e00951fa1 Subproject commit c26cf36f8387c5edf2cabb4a630f0975c35aa9fb

View File

@ -1,2 +0,0 @@
google-perftools@googlegroups.com

View File

@ -1,80 +0,0 @@
message (STATUS "Building: tcmalloc_minimal_internal")
add_library (tcmalloc_minimal_internal
./src/malloc_hook.cc
./src/base/spinlock_internal.cc
./src/base/spinlock.cc
./src/base/dynamic_annotations.c
./src/base/linuxthreads.cc
./src/base/elf_mem_image.cc
./src/base/vdso_support.cc
./src/base/sysinfo.cc
./src/base/low_level_alloc.cc
./src/base/thread_lister.c
./src/base/logging.cc
./src/base/atomicops-internals-x86.cc
./src/memfs_malloc.cc
./src/tcmalloc.cc
./src/malloc_extension.cc
./src/thread_cache.cc
./src/symbolize.cc
./src/page_heap.cc
./src/maybe_threads.cc
./src/central_freelist.cc
./src/static_vars.cc
./src/sampler.cc
./src/internal_logging.cc
./src/system-alloc.cc
./src/span.cc
./src/common.cc
./src/stacktrace.cc
./src/stack_trace_table.cc
./src/heap-checker.cc
./src/heap-checker-bcad.cc
./src/heap-profile-table.cc
./src/raw_printer.cc
./src/memory_region_map.cc
)
target_compile_options (tcmalloc_minimal_internal
PRIVATE
-DNO_TCMALLOC_SAMPLES
-DNDEBUG
-DNO_FRAME_POINTER
-Wwrite-strings
-Wno-sign-compare
-Wno-unused-result
-Wno-deprecated-declarations
-Wno-unused-function
-Wno-unused-private-field
PUBLIC
-fno-builtin-malloc
-fno-builtin-free
-fno-builtin-realloc
-fno-builtin-calloc
-fno-builtin-cfree
-fno-builtin-memalign
-fno-builtin-posix_memalign
-fno-builtin-valloc
-fno-builtin-pvalloc
)
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 3.9.1)
target_compile_options(tcmalloc_minimal_internal PUBLIC -Wno-dynamic-exception-spec )
endif ()
if (CMAKE_SYSTEM MATCHES "FreeBSD" AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
target_compile_options(tcmalloc_minimal_internal PUBLIC -Wno-unused-but-set-variable)
endif ()
if (CMAKE_SYSTEM MATCHES "FreeBSD")
target_compile_definitions(tcmalloc_minimal_internal PUBLIC _GNU_SOURCE)
endif ()
target_include_directories (tcmalloc_minimal_internal PUBLIC include)
target_include_directories (tcmalloc_minimal_internal PRIVATE src)
find_package (Threads)
target_link_libraries (tcmalloc_minimal_internal ${CMAKE_THREAD_LIBS_INIT})

View File

@ -1,28 +0,0 @@
Copyright (c) 2005, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,8 +0,0 @@
https://github.com/gperftools/gperftools/commit/dde32f8bbc95312379f9f5a651799815bb6327c5
Several modifications:
1. Disabled TCMALLOC_AGGRESSIVE_DECOMMIT by default. It is important.
2. Using only files for tcmalloc_minimal build (./configure --enable-minimal).
3. Using some compiler flags from project.
4. Removed warning about unused variable when build with NDEBUG (by default).
5. Including config.h with relative path.

View File

@ -1,422 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Maxim Lifantsev (with design ideas by Sanjay Ghemawat)
//
//
// Module for detecing heap (memory) leaks.
//
// For full(er) information, see doc/heap_checker.html
//
// This module can be linked into programs with
// no slowdown caused by this unless you activate the leak-checker:
//
// 1. Set the environment variable HEAPCHEK to _type_ before
// running the program.
//
// _type_ is usually "normal" but can also be "minimal", "strict", or
// "draconian". (See the html file for other options, like 'local'.)
//
// After that, just run your binary. If the heap-checker detects
// a memory leak at program-exit, it will print instructions on how
// to track down the leak.
#ifndef BASE_HEAP_CHECKER_H_
#define BASE_HEAP_CHECKER_H_
#include <sys/types.h> // for size_t
// I can't #include config.h in this public API file, but I should
// really use configure (and make malloc_extension.h a .in file) to
// figure out if the system has stdint.h or not. But I'm lazy, so
// for now I'm assuming it's a problem only with MSVC.
#ifndef _MSC_VER
#include <stdint.h> // for uintptr_t
#endif
#include <stdarg.h> // for va_list
#include <vector>
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
// The class is thread-safe with respect to all the provided static methods,
// as well as HeapLeakChecker objects: they can be accessed by multiple threads.
class PERFTOOLS_DLL_DECL HeapLeakChecker {
public:
// ----------------------------------------------------------------------- //
// Static functions for working with (whole-program) leak checking.
// If heap leak checking is currently active in some mode
// e.g. if leak checking was started (and is still active now)
// due to HEAPCHECK=... defined in the environment.
// The return value reflects iff HeapLeakChecker objects manually
// constructed right now will be doing leak checking or nothing.
// Note that we can go from active to inactive state during InitGoogle()
// if FLAGS_heap_check gets set to "" by some code before/during InitGoogle().
static bool IsActive();
// Return pointer to the whole-program checker if it has been created
// and NULL otherwise.
// Once GlobalChecker() returns non-NULL that object will not disappear and
// will be returned by all later GlobalChecker calls.
// This is mainly to access BytesLeaked() and ObjectsLeaked() (see below)
// for the whole-program checker after one calls NoGlobalLeaks()
// or similar and gets false.
static HeapLeakChecker* GlobalChecker();
// Do whole-program leak check now (if it was activated for this binary);
// return false only if it was activated and has failed.
// The mode of the check is controlled by the command-line flags.
// This method can be called repeatedly.
// Things like GlobalChecker()->SameHeap() can also be called explicitly
// to do the desired flavor of the check.
static bool NoGlobalLeaks();
// If whole-program checker if active,
// cancel its automatic execution after main() exits.
// This requires that some leak check (e.g. NoGlobalLeaks())
// has been called at least once on the whole-program checker.
static void CancelGlobalCheck();
// ----------------------------------------------------------------------- //
// Non-static functions for starting and doing leak checking.
// Start checking and name the leak check performed.
// The name is used in naming dumped profiles
// and needs to be unique only within your binary.
// It must also be a string that can be a part of a file name,
// in particular not contain path expressions.
explicit HeapLeakChecker(const char *name);
// Destructor (verifies that some *NoLeaks or *SameHeap method
// has been called at least once).
~HeapLeakChecker();
// These used to be different but are all the same now: they return
// true iff all memory allocated since this HeapLeakChecker object
// was constructor is still reachable from global state.
//
// Because we fork to convert addresses to symbol-names, and forking
// is not thread-safe, and we may be called in a threaded context,
// we do not try to symbolize addresses when called manually.
bool NoLeaks() { return DoNoLeaks(DO_NOT_SYMBOLIZE); }
// These forms are obsolete; use NoLeaks() instead.
// TODO(csilvers): mark as DEPRECATED.
bool QuickNoLeaks() { return NoLeaks(); }
bool BriefNoLeaks() { return NoLeaks(); }
bool SameHeap() { return NoLeaks(); }
bool QuickSameHeap() { return NoLeaks(); }
bool BriefSameHeap() { return NoLeaks(); }
// Detailed information about the number of leaked bytes and objects
// (both of these can be negative as well).
// These are available only after a *SameHeap or *NoLeaks
// method has been called.
// Note that it's possible for both of these to be zero
// while SameHeap() or NoLeaks() returned false in case
// of a heap state change that is significant
// but preserves the byte and object counts.
ssize_t BytesLeaked() const;
ssize_t ObjectsLeaked() const;
// ----------------------------------------------------------------------- //
// Static helpers to make us ignore certain leaks.
// Scoped helper class. Should be allocated on the stack inside a
// block of code. Any heap allocations done in the code block
// covered by the scoped object (including in nested function calls
// done by the code block) will not be reported as leaks. This is
// the recommended replacement for the GetDisableChecksStart() and
// DisableChecksToHereFrom() routines below.
//
// Example:
// void Foo() {
// HeapLeakChecker::Disabler disabler;
// ... code that allocates objects whose leaks should be ignored ...
// }
//
// REQUIRES: Destructor runs in same thread as constructor
class Disabler {
public:
Disabler();
~Disabler();
private:
Disabler(const Disabler&); // disallow copy
void operator=(const Disabler&); // and assign
};
// Ignore an object located at 'ptr' (can go at the start or into the object)
// as well as all heap objects (transitively) referenced from it for the
// purposes of heap leak checking. Returns 'ptr' so that one can write
// static T* obj = IgnoreObject(new T(...));
//
// If 'ptr' does not point to an active allocated object at the time of this
// call, it is ignored; but if it does, the object must not get deleted from
// the heap later on.
//
// See also HiddenPointer, below, if you need to prevent a pointer from
// being traversed by the heap checker but do not wish to transitively
// whitelist objects referenced through it.
template <typename T>
static T* IgnoreObject(T* ptr) {
DoIgnoreObject(static_cast<const void*>(const_cast<const T*>(ptr)));
return ptr;
}
// Undo what an earlier IgnoreObject() call promised and asked to do.
// At the time of this call 'ptr' must point at or inside of an active
// allocated object which was previously registered with IgnoreObject().
static void UnIgnoreObject(const void* ptr);
// ----------------------------------------------------------------------- //
// Internal types defined in .cc
class Allocator;
struct RangeValue;
private:
// ----------------------------------------------------------------------- //
// Various helpers
// Create the name of the heap profile file.
// Should be deleted via Allocator::Free().
char* MakeProfileNameLocked();
// Helper for constructors
void Create(const char *name, bool make_start_snapshot);
enum ShouldSymbolize { SYMBOLIZE, DO_NOT_SYMBOLIZE };
// Helper for *NoLeaks and *SameHeap
bool DoNoLeaks(ShouldSymbolize should_symbolize);
// Helper for NoGlobalLeaks, also called by the global destructor.
static bool NoGlobalLeaksMaybeSymbolize(ShouldSymbolize should_symbolize);
// These used to be public, but they are now deprecated.
// Will remove entirely when all internal uses are fixed.
// In the meantime, use friendship so the unittest can still test them.
static void* GetDisableChecksStart();
static void DisableChecksToHereFrom(const void* start_address);
static void DisableChecksIn(const char* pattern);
friend void RangeDisabledLeaks();
friend void NamedTwoDisabledLeaks();
friend void* RunNamedDisabledLeaks(void*);
friend void TestHeapLeakCheckerNamedDisabling();
// Actually implements IgnoreObject().
static void DoIgnoreObject(const void* ptr);
// Disable checks based on stack trace entry at a depth <=
// max_depth. Used to hide allocations done inside some special
// libraries.
static void DisableChecksFromToLocked(const void* start_address,
const void* end_address,
int max_depth);
// Helper for DoNoLeaks to ignore all objects reachable from all live data
static void IgnoreAllLiveObjectsLocked(const void* self_stack_top);
// Callback we pass to TCMalloc_ListAllProcessThreads (see thread_lister.h)
// that is invoked when all threads of our process are found and stopped.
// The call back does the things needed to ignore live data reachable from
// thread stacks and registers for all our threads
// as well as do other global-live-data ignoring
// (via IgnoreNonThreadLiveObjectsLocked)
// during the quiet state of all threads being stopped.
// For the argument meaning see the comment by TCMalloc_ListAllProcessThreads.
// Here we only use num_threads and thread_pids, that TCMalloc_ListAllProcessThreads
// fills for us with the number and pids of all the threads of our process
// it found and attached to.
static int IgnoreLiveThreadsLocked(void* parameter,
int num_threads,
pid_t* thread_pids,
va_list ap);
// Helper for IgnoreAllLiveObjectsLocked and IgnoreLiveThreadsLocked
// that we prefer to execute from IgnoreLiveThreadsLocked
// while all threads are stopped.
// This helper does live object discovery and ignoring
// for all objects that are reachable from everything
// not related to thread stacks and registers.
static void IgnoreNonThreadLiveObjectsLocked();
// Helper for IgnoreNonThreadLiveObjectsLocked and IgnoreLiveThreadsLocked
// to discover and ignore all heap objects
// reachable from currently considered live objects
// (live_objects static global variable in out .cc file).
// "name", "name2" are two strings that we print one after another
// in a debug message to describe what kind of live object sources
// are being used.
static void IgnoreLiveObjectsLocked(const char* name, const char* name2);
// Do the overall whole-program heap leak check if needed;
// returns true when did the leak check.
static bool DoMainHeapCheck();
// Type of task for UseProcMapsLocked
enum ProcMapsTask {
RECORD_GLOBAL_DATA,
DISABLE_LIBRARY_ALLOCS
};
// Success/Error Return codes for UseProcMapsLocked.
enum ProcMapsResult {
PROC_MAPS_USED,
CANT_OPEN_PROC_MAPS,
NO_SHARED_LIBS_IN_PROC_MAPS
};
// Read /proc/self/maps, parse it, and do the 'proc_maps_task' for each line.
static ProcMapsResult UseProcMapsLocked(ProcMapsTask proc_maps_task);
// A ProcMapsTask to disable allocations from 'library'
// that is mapped to [start_address..end_address)
// (only if library is a certain system library).
static void DisableLibraryAllocsLocked(const char* library,
uintptr_t start_address,
uintptr_t end_address);
// Return true iff "*ptr" points to a heap object
// ("*ptr" can point at the start or inside of a heap object
// so that this works e.g. for pointers to C++ arrays, C++ strings,
// multiple-inherited objects, or pointers to members).
// We also fill *object_size for this object then
// and we move "*ptr" to point to the very start of the heap object.
static inline bool HaveOnHeapLocked(const void** ptr, size_t* object_size);
// Helper to shutdown heap leak checker when it's not needed
// or can't function properly.
static void TurnItselfOffLocked();
// Internally-used c-tor to start whole-executable checking.
HeapLeakChecker();
// ----------------------------------------------------------------------- //
// Friends and externally accessed helpers.
// Helper for VerifyHeapProfileTableStackGet in the unittest
// to get the recorded allocation caller for ptr,
// which must be a heap object.
static const void* GetAllocCaller(void* ptr);
friend void VerifyHeapProfileTableStackGet();
// This gets to execute before constructors for all global objects
static void BeforeConstructorsLocked();
friend void HeapLeakChecker_BeforeConstructors();
// This gets to execute after destructors for all global objects
friend void HeapLeakChecker_AfterDestructors();
// Full starting of recommended whole-program checking.
friend void HeapLeakChecker_InternalInitStart();
// Runs REGISTER_HEAPCHECK_CLEANUP cleanups and potentially
// calls DoMainHeapCheck
friend void HeapLeakChecker_RunHeapCleanups();
// ----------------------------------------------------------------------- //
// Member data.
class SpinLock* lock_; // to make HeapLeakChecker objects thread-safe
const char* name_; // our remembered name (we own it)
// NULL means this leak checker is a noop
// Snapshot taken when the checker was created. May be NULL
// for the global heap checker object. We use void* instead of
// HeapProfileTable::Snapshot* to avoid including heap-profile-table.h.
void* start_snapshot_;
bool has_checked_; // if we have done the leak check, so these are ready:
ssize_t inuse_bytes_increase_; // bytes-in-use increase for this checker
ssize_t inuse_allocs_increase_; // allocations-in-use increase
// for this checker
bool keep_profiles_; // iff we should keep the heap profiles we've made
// ----------------------------------------------------------------------- //
// Disallow "evil" constructors.
HeapLeakChecker(const HeapLeakChecker&);
void operator=(const HeapLeakChecker&);
};
// Holds a pointer that will not be traversed by the heap checker.
// Contrast with HeapLeakChecker::IgnoreObject(o), in which o and
// all objects reachable from o are ignored by the heap checker.
template <class T>
class HiddenPointer {
public:
explicit HiddenPointer(T* t)
: masked_t_(reinterpret_cast<uintptr_t>(t) ^ kHideMask) {
}
// Returns unhidden pointer. Be careful where you save the result.
T* get() const { return reinterpret_cast<T*>(masked_t_ ^ kHideMask); }
private:
// Arbitrary value, but not such that xor'ing with it is likely
// to map one valid pointer to another valid pointer:
static const uintptr_t kHideMask =
static_cast<uintptr_t>(0xF03A5F7BF03A5F7Bll);
uintptr_t masked_t_;
};
// A class that exists solely to run its destructor. This class should not be
// used directly, but instead by the REGISTER_HEAPCHECK_CLEANUP macro below.
class PERFTOOLS_DLL_DECL HeapCleaner {
public:
typedef void (*void_function)(void);
HeapCleaner(void_function f);
static void RunHeapCleanups();
private:
static std::vector<void_function>* heap_cleanups_;
};
// A macro to declare module heap check cleanup tasks
// (they run only if we are doing heap leak checking.)
// 'body' should be the cleanup code to run. 'name' doesn't matter,
// but must be unique amongst all REGISTER_HEAPCHECK_CLEANUP calls.
#define REGISTER_HEAPCHECK_CLEANUP(name, body) \
namespace { \
void heapcheck_cleanup_##name() { body; } \
static HeapCleaner heapcheck_cleaner_##name(&heapcheck_cleanup_##name); \
}
#endif // BASE_HEAP_CHECKER_H_

View File

@ -1,105 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*
* Module for heap-profiling.
*
* For full(er) information, see doc/heapprofile.html
*
* This module can be linked into your program with
* no slowdown caused by this unless you activate the profiler
* using one of the following methods:
*
* 1. Before starting the program, set the environment variable
* "HEAPPROFILE" to be the name of the file to which the profile
* data should be written.
*
* 2. Programmatically, start and stop the profiler using the
* routines "HeapProfilerStart(filename)" and "HeapProfilerStop()".
*
*/
#ifndef BASE_HEAP_PROFILER_H_
#define BASE_HEAP_PROFILER_H_
#include <stddef.h>
/* Annoying stuff for windows; makes sure clients can import these functions */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
/* All this code should be usable from within C apps. */
#ifdef __cplusplus
extern "C" {
#endif
/* Start profiling and arrange to write profile data to file names
* of the form: "prefix.0000", "prefix.0001", ...
*/
PERFTOOLS_DLL_DECL void HeapProfilerStart(const char* prefix);
/* Returns non-zero if we are currently profiling the heap. (Returns
* an int rather than a bool so it's usable from C.) This is true
* between calls to HeapProfilerStart() and HeapProfilerStop(), and
* also if the program has been run with HEAPPROFILER, or some other
* way to turn on whole-program profiling.
*/
int IsHeapProfilerRunning();
/* Stop heap profiling. Can be restarted again with HeapProfilerStart(),
* but the currently accumulated profiling information will be cleared.
*/
PERFTOOLS_DLL_DECL void HeapProfilerStop();
/* Dump a profile now - can be used for dumping at a hopefully
* quiescent state in your program, in order to more easily track down
* memory leaks. Will include the reason in the logged message
*/
PERFTOOLS_DLL_DECL void HeapProfilerDump(const char *reason);
/* Generate current heap profiling information.
* Returns an empty string when heap profiling is not active.
* The returned pointer is a '\0'-terminated string allocated using malloc()
* and should be free()-ed as soon as the caller does not need it anymore.
*/
PERFTOOLS_DLL_DECL char* GetHeapProfile();
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* BASE_HEAP_PROFILER_H_ */

View File

@ -1,434 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
//
// Extra extensions exported by some malloc implementations. These
// extensions are accessed through a virtual base class so an
// application can link against a malloc that does not implement these
// extensions, and it will get default versions that do nothing.
//
// NOTE FOR C USERS: If you wish to use this functionality from within
// a C program, see malloc_extension_c.h.
#ifndef BASE_MALLOC_EXTENSION_H_
#define BASE_MALLOC_EXTENSION_H_
#include <stddef.h>
// I can't #include config.h in this public API file, but I should
// really use configure (and make malloc_extension.h a .in file) to
// figure out if the system has stdint.h or not. But I'm lazy, so
// for now I'm assuming it's a problem only with MSVC.
#ifndef _MSC_VER
#include <stdint.h>
#endif
#include <string>
#include <vector>
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
static const int kMallocHistogramSize = 64;
// One day, we could support other types of writers (perhaps for C?)
typedef std::string MallocExtensionWriter;
namespace base {
struct MallocRange;
}
// Interface to a pluggable system allocator.
class PERFTOOLS_DLL_DECL SysAllocator {
public:
SysAllocator() {
}
virtual ~SysAllocator();
// Allocates "size"-byte of memory from system aligned with "alignment".
// Returns NULL if failed. Otherwise, the returned pointer p up to and
// including (p + actual_size -1) have been allocated.
virtual void* Alloc(size_t size, size_t *actual_size, size_t alignment) = 0;
};
// The default implementations of the following routines do nothing.
// All implementations should be thread-safe; the current one
// (TCMallocImplementation) is.
class PERFTOOLS_DLL_DECL MallocExtension {
public:
virtual ~MallocExtension();
// Call this very early in the program execution -- say, in a global
// constructor -- to set up parameters and state needed by all
// instrumented malloc implemenatations. One example: this routine
// sets environemnt variables to tell STL to use libc's malloc()
// instead of doing its own memory management. This is safe to call
// multiple times, as long as each time is before threads start up.
static void Initialize();
// See "verify_memory.h" to see what these routines do
virtual bool VerifyAllMemory();
virtual bool VerifyNewMemory(const void* p);
virtual bool VerifyArrayNewMemory(const void* p);
virtual bool VerifyMallocMemory(const void* p);
virtual bool MallocMemoryStats(int* blocks, size_t* total,
int histogram[kMallocHistogramSize]);
// Get a human readable description of the following malloc data structures.
// - Total inuse memory by application.
// - Free memory(thread, central and page heap),
// - Freelist of central cache, each class.
// - Page heap freelist.
// The state is stored as a null-terminated string
// in a prefix of "buffer[0,buffer_length-1]".
// REQUIRES: buffer_length > 0.
virtual void GetStats(char* buffer, int buffer_length);
// Outputs to "writer" a sample of live objects and the stack traces
// that allocated these objects. The format of the returned output
// is equivalent to the output of the heap profiler and can
// therefore be passed to "pprof". This function is equivalent to
// ReadStackTraces. The main difference is that this function returns
// serialized data appropriately formatted for use by the pprof tool.
// NOTE: by default, tcmalloc does not do any heap sampling, and this
// function will always return an empty sample. To get useful
// data from GetHeapSample, you must also set the environment
// variable TCMALLOC_SAMPLE_PARAMETER to a value such as 524288.
virtual void GetHeapSample(MallocExtensionWriter* writer);
// Outputs to "writer" the stack traces that caused growth in the
// address space size. The format of the returned output is
// equivalent to the output of the heap profiler and can therefore
// be passed to "pprof". This function is equivalent to
// ReadHeapGrowthStackTraces. The main difference is that this function
// returns serialized data appropriately formatted for use by the
// pprof tool. (This does not depend on, or require,
// TCMALLOC_SAMPLE_PARAMETER.)
virtual void GetHeapGrowthStacks(MallocExtensionWriter* writer);
// Invokes func(arg, range) for every controlled memory
// range. *range is filled in with information about the range.
//
// This is a best-effort interface useful only for performance
// analysis. The implementation may not call func at all.
typedef void (RangeFunction)(void*, const base::MallocRange*);
virtual void Ranges(void* arg, RangeFunction func);
// -------------------------------------------------------------------
// Control operations for getting and setting malloc implementation
// specific parameters. Some currently useful properties:
//
// generic
// -------
// "generic.current_allocated_bytes"
// Number of bytes currently allocated by application
// This property is not writable.
//
// "generic.heap_size"
// Number of bytes in the heap ==
// current_allocated_bytes +
// fragmentation +
// freed memory regions
// This property is not writable.
//
// tcmalloc
// --------
// "tcmalloc.max_total_thread_cache_bytes"
// Upper limit on total number of bytes stored across all
// per-thread caches. Default: 16MB.
//
// "tcmalloc.current_total_thread_cache_bytes"
// Number of bytes used across all thread caches.
// This property is not writable.
//
// "tcmalloc.central_cache_free_bytes"
// Number of free bytes in the central cache that have been
// assigned to size classes. They always count towards virtual
// memory usage, and unless the underlying memory is swapped out
// by the OS, they also count towards physical memory usage.
// This property is not writable.
//
// "tcmalloc.transfer_cache_free_bytes"
// Number of free bytes that are waiting to be transfered between
// the central cache and a thread cache. They always count
// towards virtual memory usage, and unless the underlying memory
// is swapped out by the OS, they also count towards physical
// memory usage. This property is not writable.
//
// "tcmalloc.thread_cache_free_bytes"
// Number of free bytes in thread caches. They always count
// towards virtual memory usage, and unless the underlying memory
// is swapped out by the OS, they also count towards physical
// memory usage. This property is not writable.
//
// "tcmalloc.pageheap_free_bytes"
// Number of bytes in free, mapped pages in page heap. These
// bytes can be used to fulfill allocation requests. They
// always count towards virtual memory usage, and unless the
// underlying memory is swapped out by the OS, they also count
// towards physical memory usage. This property is not writable.
//
// "tcmalloc.pageheap_unmapped_bytes"
// Number of bytes in free, unmapped pages in page heap.
// These are bytes that have been released back to the OS,
// possibly by one of the MallocExtension "Release" calls.
// They can be used to fulfill allocation requests, but
// typically incur a page fault. They always count towards
// virtual memory usage, and depending on the OS, typically
// do not count towards physical memory usage. This property
// is not writable.
// -------------------------------------------------------------------
// Get the named "property"'s value. Returns true if the property
// is known. Returns false if the property is not a valid property
// name for the current malloc implementation.
// REQUIRES: property != NULL; value != NULL
virtual bool GetNumericProperty(const char* property, size_t* value);
// Set the named "property"'s value. Returns true if the property
// is known and writable. Returns false if the property is not a
// valid property name for the current malloc implementation, or
// is not writable.
// REQUIRES: property != NULL
virtual bool SetNumericProperty(const char* property, size_t value);
// Mark the current thread as "idle". This routine may optionally
// be called by threads as a hint to the malloc implementation that
// any thread-specific resources should be released. Note: this may
// be an expensive routine, so it should not be called too often.
//
// Also, if the code that calls this routine will go to sleep for
// a while, it should take care to not allocate anything between
// the call to this routine and the beginning of the sleep.
//
// Most malloc implementations ignore this routine.
virtual void MarkThreadIdle();
// Mark the current thread as "busy". This routine should be
// called after MarkThreadIdle() if the thread will now do more
// work. If this method is not called, performance may suffer.
//
// Most malloc implementations ignore this routine.
virtual void MarkThreadBusy();
// Gets the system allocator used by the malloc extension instance. Returns
// NULL for malloc implementations that do not support pluggable system
// allocators.
virtual SysAllocator* GetSystemAllocator();
// Sets the system allocator to the specified.
//
// Users could register their own system allocators for malloc implementation
// that supports pluggable system allocators, such as TCMalloc, by doing:
// alloc = new MyOwnSysAllocator();
// MallocExtension::instance()->SetSystemAllocator(alloc);
// It's up to users whether to fall back (recommended) to the default
// system allocator (use GetSystemAllocator() above) or not. The caller is
// responsible to any necessary locking.
// See tcmalloc/system-alloc.h for the interface and
// tcmalloc/memfs_malloc.cc for the examples.
//
// It's a no-op for malloc implementations that do not support pluggable
// system allocators.
virtual void SetSystemAllocator(SysAllocator *a);
// Try to release num_bytes of free memory back to the operating
// system for reuse. Use this extension with caution -- to get this
// memory back may require faulting pages back in by the OS, and
// that may be slow. (Currently only implemented in tcmalloc.)
virtual void ReleaseToSystem(size_t num_bytes);
// Same as ReleaseToSystem() but release as much memory as possible.
virtual void ReleaseFreeMemory();
// Sets the rate at which we release unused memory to the system.
// Zero means we never release memory back to the system. Increase
// this flag to return memory faster; decrease it to return memory
// slower. Reasonable rates are in the range [0,10]. (Currently
// only implemented in tcmalloc).
virtual void SetMemoryReleaseRate(double rate);
// Gets the release rate. Returns a value < 0 if unknown.
virtual double GetMemoryReleaseRate();
// Returns the estimated number of bytes that will be allocated for
// a request of "size" bytes. This is an estimate: an allocation of
// SIZE bytes may reserve more bytes, but will never reserve less.
// (Currently only implemented in tcmalloc, other implementations
// always return SIZE.)
// This is equivalent to malloc_good_size() in OS X.
virtual size_t GetEstimatedAllocatedSize(size_t size);
// Returns the actual number N of bytes reserved by tcmalloc for the
// pointer p. The client is allowed to use the range of bytes
// [p, p+N) in any way it wishes (i.e. N is the "usable size" of this
// allocation). This number may be equal to or greater than the number
// of bytes requested when p was allocated.
// p must have been allocated by this malloc implementation,
// must not be an interior pointer -- that is, must be exactly
// the pointer returned to by malloc() et al., not some offset
// from that -- and should not have been freed yet. p may be NULL.
// (Currently only implemented in tcmalloc; other implementations
// will return 0.)
// This is equivalent to malloc_size() in OS X, malloc_usable_size()
// in glibc, and _msize() for windows.
virtual size_t GetAllocatedSize(const void* p);
// Returns kOwned if this malloc implementation allocated the memory
// pointed to by p, or kNotOwned if some other malloc implementation
// allocated it or p is NULL. May also return kUnknownOwnership if
// the malloc implementation does not keep track of ownership.
// REQUIRES: p must be a value returned from a previous call to
// malloc(), calloc(), realloc(), memalign(), posix_memalign(),
// valloc(), pvalloc(), new, or new[], and must refer to memory that
// is currently allocated (so, for instance, you should not pass in
// a pointer after having called free() on it).
enum Ownership {
// NOTE: Enum values MUST be kept in sync with the version in
// malloc_extension_c.h
kUnknownOwnership = 0,
kOwned,
kNotOwned
};
virtual Ownership GetOwnership(const void* p);
// The current malloc implementation. Always non-NULL.
static MallocExtension* instance();
// Change the malloc implementation. Typically called by the
// malloc implementation during initialization.
static void Register(MallocExtension* implementation);
// Returns detailed information about malloc's freelists. For each list,
// return a FreeListInfo:
struct FreeListInfo {
size_t min_object_size;
size_t max_object_size;
size_t total_bytes_free;
const char* type;
};
// Each item in the vector refers to a different freelist. The lists
// are identified by the range of allocations that objects in the
// list can satisfy ([min_object_size, max_object_size]) and the
// type of freelist (see below). The current size of the list is
// returned in total_bytes_free (which count against a processes
// resident and virtual size).
//
// Currently supported types are:
//
// "tcmalloc.page{_unmapped}" - tcmalloc's page heap. An entry for each size
// class in the page heap is returned. Bytes in "page_unmapped"
// are no longer backed by physical memory and do not count against
// the resident size of a process.
//
// "tcmalloc.large{_unmapped}" - tcmalloc's list of objects larger
// than the largest page heap size class. Only one "large"
// entry is returned. There is no upper-bound on the size
// of objects in the large free list; this call returns
// kint64max for max_object_size. Bytes in
// "large_unmapped" are no longer backed by physical memory
// and do not count against the resident size of a process.
//
// "tcmalloc.central" - tcmalloc's central free-list. One entry per
// size-class is returned. Never unmapped.
//
// "debug.free_queue" - free objects queued by the debug allocator
// and not returned to tcmalloc.
//
// "tcmalloc.thread" - tcmalloc's per-thread caches. Never unmapped.
virtual void GetFreeListSizes(std::vector<FreeListInfo>* v);
// Get a list of stack traces of sampled allocation points. Returns
// a pointer to a "new[]-ed" result array, and stores the sample
// period in "sample_period".
//
// The state is stored as a sequence of adjacent entries
// in the returned array. Each entry has the following form:
// uintptr_t count; // Number of objects with following trace
// uintptr_t size; // Total size of objects with following trace
// uintptr_t depth; // Number of PC values in stack trace
// void* stack[depth]; // PC values that form the stack trace
//
// The list of entries is terminated by a "count" of 0.
//
// It is the responsibility of the caller to "delete[]" the returned array.
//
// May return NULL to indicate no results.
//
// This is an internal extension. Callers should use the more
// convenient "GetHeapSample(string*)" method defined above.
virtual void** ReadStackTraces(int* sample_period);
// Like ReadStackTraces(), but returns stack traces that caused growth
// in the address space size.
virtual void** ReadHeapGrowthStackTraces();
// Returns the size in bytes of the calling threads cache.
virtual size_t GetThreadCacheSize();
// Like MarkThreadIdle, but does not destroy the internal data
// structures of the thread cache. When the thread resumes, it wil
// have an empty cache but will not need to pay to reconstruct the
// cache data structures.
virtual void MarkThreadTemporarilyIdle();
};
namespace base {
// Information passed per range. More fields may be added later.
struct MallocRange {
enum Type {
INUSE, // Application is using this range
FREE, // Range is currently free
UNMAPPED, // Backing physical memory has been returned to the OS
UNKNOWN
// More enum values may be added in the future
};
uintptr_t address; // Address of range
size_t length; // Byte length of range
Type type; // Type of this range
double fraction; // Fraction of range that is being used (0 if !INUSE)
// Perhaps add the following:
// - stack trace if this range was sampled
// - heap growth stack trace if applicable to this range
// - age when allocated (for inuse) or freed (if not in use)
};
} // namespace base
#endif // BASE_MALLOC_EXTENSION_H_

View File

@ -1,101 +0,0 @@
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* --
* Author: Craig Silverstein
*
* C shims for the C++ malloc_extension.h. See malloc_extension.h for
* details. Note these C shims always work on
* MallocExtension::instance(); it is not possible to have more than
* one MallocExtension object in C applications.
*/
#ifndef _MALLOC_EXTENSION_C_H_
#define _MALLOC_EXTENSION_C_H_
#include <stddef.h>
#include <sys/types.h>
/* Annoying stuff for windows -- makes sure clients can import these fns */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define kMallocExtensionHistogramSize 64
PERFTOOLS_DLL_DECL int MallocExtension_VerifyAllMemory(void);
PERFTOOLS_DLL_DECL int MallocExtension_VerifyNewMemory(const void* p);
PERFTOOLS_DLL_DECL int MallocExtension_VerifyArrayNewMemory(const void* p);
PERFTOOLS_DLL_DECL int MallocExtension_VerifyMallocMemory(const void* p);
PERFTOOLS_DLL_DECL int MallocExtension_MallocMemoryStats(int* blocks, size_t* total,
int histogram[kMallocExtensionHistogramSize]);
PERFTOOLS_DLL_DECL void MallocExtension_GetStats(char* buffer, int buffer_length);
/* TODO(csilvers): write a C version of these routines, that perhaps
* takes a function ptr and a void *.
*/
/* void MallocExtension_GetHeapSample(string* result); */
/* void MallocExtension_GetHeapGrowthStacks(string* result); */
PERFTOOLS_DLL_DECL int MallocExtension_GetNumericProperty(const char* property, size_t* value);
PERFTOOLS_DLL_DECL int MallocExtension_SetNumericProperty(const char* property, size_t value);
PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadIdle(void);
PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadBusy(void);
PERFTOOLS_DLL_DECL void MallocExtension_ReleaseToSystem(size_t num_bytes);
PERFTOOLS_DLL_DECL void MallocExtension_ReleaseFreeMemory(void);
PERFTOOLS_DLL_DECL size_t MallocExtension_GetEstimatedAllocatedSize(size_t size);
PERFTOOLS_DLL_DECL size_t MallocExtension_GetAllocatedSize(const void* p);
PERFTOOLS_DLL_DECL size_t MallocExtension_GetThreadCacheSize(void);
PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadTemporarilyIdle(void);
/*
* NOTE: These enum values MUST be kept in sync with the version in
* malloc_extension.h
*/
typedef enum {
MallocExtension_kUnknownOwnership = 0,
MallocExtension_kOwned,
MallocExtension_kNotOwned
} MallocExtension_Ownership;
PERFTOOLS_DLL_DECL MallocExtension_Ownership MallocExtension_GetOwnership(const void* p);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* _MALLOC_EXTENSION_C_H_ */

View File

@ -1,359 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// Some of our malloc implementations can invoke the following hooks whenever
// memory is allocated or deallocated. MallocHook is thread-safe, and things
// you do before calling AddFooHook(MyHook) are visible to any resulting calls
// to MyHook. Hooks must be thread-safe. If you write:
//
// CHECK(MallocHook::AddNewHook(&MyNewHook));
//
// MyNewHook will be invoked in subsequent calls in the current thread, but
// there are no guarantees on when it might be invoked in other threads.
//
// There are a limited number of slots available for each hook type. Add*Hook
// will return false if there are no slots available. Remove*Hook will return
// false if the given hook was not already installed.
//
// The order in which individual hooks are called in Invoke*Hook is undefined.
//
// It is safe for a hook to remove itself within Invoke*Hook and add other
// hooks. Any hooks added inside a hook invocation (for the same hook type)
// will not be invoked for the current invocation.
//
// One important user of these hooks is the heap profiler.
//
// CAVEAT: If you add new MallocHook::Invoke* calls then those calls must be
// directly in the code of the (de)allocation function that is provided to the
// user and that function must have an ATTRIBUTE_SECTION(malloc_hook) attribute.
//
// Note: the Invoke*Hook() functions are defined in malloc_hook-inl.h. If you
// need to invoke a hook (which you shouldn't unless you're part of tcmalloc),
// be sure to #include malloc_hook-inl.h in addition to malloc_hook.h.
//
// NOTE FOR C USERS: If you want to use malloc_hook functionality from
// a C program, #include malloc_hook_c.h instead of this file.
#ifndef _MALLOC_HOOK_H_
#define _MALLOC_HOOK_H_
#include <stddef.h>
#include <sys/types.h>
extern "C" {
#include "malloc_hook_c.h" // a C version of the malloc_hook interface
}
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
// The C++ methods below call the C version (MallocHook_*), and thus
// convert between an int and a bool. Windows complains about this
// (a "performance warning") which we don't care about, so we suppress.
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4800)
#endif
// Note: malloc_hook_c.h defines MallocHook_*Hook and
// MallocHook_{Add,Remove}*Hook. The version of these inside the MallocHook
// class are defined in terms of the malloc_hook_c version. See malloc_hook_c.h
// for details of these types/functions.
class PERFTOOLS_DLL_DECL MallocHook {
public:
// The NewHook is invoked whenever an object is allocated.
// It may be passed NULL if the allocator returned NULL.
typedef MallocHook_NewHook NewHook;
inline static bool AddNewHook(NewHook hook) {
return MallocHook_AddNewHook(hook);
}
inline static bool RemoveNewHook(NewHook hook) {
return MallocHook_RemoveNewHook(hook);
}
inline static void InvokeNewHook(const void* p, size_t s);
// The DeleteHook is invoked whenever an object is deallocated.
// It may be passed NULL if the caller is trying to delete NULL.
typedef MallocHook_DeleteHook DeleteHook;
inline static bool AddDeleteHook(DeleteHook hook) {
return MallocHook_AddDeleteHook(hook);
}
inline static bool RemoveDeleteHook(DeleteHook hook) {
return MallocHook_RemoveDeleteHook(hook);
}
inline static void InvokeDeleteHook(const void* p);
// The PreMmapHook is invoked with mmap or mmap64 arguments just
// before the call is actually made. Such a hook may be useful
// in memory limited contexts, to catch allocations that will exceed
// a memory limit, and take outside actions to increase that limit.
typedef MallocHook_PreMmapHook PreMmapHook;
inline static bool AddPreMmapHook(PreMmapHook hook) {
return MallocHook_AddPreMmapHook(hook);
}
inline static bool RemovePreMmapHook(PreMmapHook hook) {
return MallocHook_RemovePreMmapHook(hook);
}
inline static void InvokePreMmapHook(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
// The MmapReplacement is invoked after the PreMmapHook but before
// the call is actually made. The MmapReplacement should return true
// if it handled the call, or false if it is still necessary to
// call mmap/mmap64.
// This should be used only by experts, and users must be be
// extremely careful to avoid recursive calls to mmap. The replacement
// should be async signal safe.
// Only one MmapReplacement is supported. After setting an MmapReplacement
// you must call RemoveMmapReplacement before calling SetMmapReplacement
// again.
typedef MallocHook_MmapReplacement MmapReplacement;
inline static bool SetMmapReplacement(MmapReplacement hook) {
return MallocHook_SetMmapReplacement(hook);
}
inline static bool RemoveMmapReplacement(MmapReplacement hook) {
return MallocHook_RemoveMmapReplacement(hook);
}
inline static bool InvokeMmapReplacement(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result);
// The MmapHook is invoked whenever a region of memory is mapped.
// It may be passed MAP_FAILED if the mmap failed.
typedef MallocHook_MmapHook MmapHook;
inline static bool AddMmapHook(MmapHook hook) {
return MallocHook_AddMmapHook(hook);
}
inline static bool RemoveMmapHook(MmapHook hook) {
return MallocHook_RemoveMmapHook(hook);
}
inline static void InvokeMmapHook(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
// The MunmapReplacement is invoked with munmap arguments just before
// the call is actually made. The MunmapReplacement should return true
// if it handled the call, or false if it is still necessary to
// call munmap.
// This should be used only by experts. The replacement should be
// async signal safe.
// Only one MunmapReplacement is supported. After setting an
// MunmapReplacement you must call RemoveMunmapReplacement before
// calling SetMunmapReplacement again.
typedef MallocHook_MunmapReplacement MunmapReplacement;
inline static bool SetMunmapReplacement(MunmapReplacement hook) {
return MallocHook_SetMunmapReplacement(hook);
}
inline static bool RemoveMunmapReplacement(MunmapReplacement hook) {
return MallocHook_RemoveMunmapReplacement(hook);
}
inline static bool InvokeMunmapReplacement(const void* p,
size_t size,
int* result);
// The MunmapHook is invoked whenever a region of memory is unmapped.
typedef MallocHook_MunmapHook MunmapHook;
inline static bool AddMunmapHook(MunmapHook hook) {
return MallocHook_AddMunmapHook(hook);
}
inline static bool RemoveMunmapHook(MunmapHook hook) {
return MallocHook_RemoveMunmapHook(hook);
}
inline static void InvokeMunmapHook(const void* p, size_t size);
// The MremapHook is invoked whenever a region of memory is remapped.
typedef MallocHook_MremapHook MremapHook;
inline static bool AddMremapHook(MremapHook hook) {
return MallocHook_AddMremapHook(hook);
}
inline static bool RemoveMremapHook(MremapHook hook) {
return MallocHook_RemoveMremapHook(hook);
}
inline static void InvokeMremapHook(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr);
// The PreSbrkHook is invoked just before sbrk is called -- except when
// the increment is 0. This is because sbrk(0) is often called
// to get the top of the memory stack, and is not actually a
// memory-allocation call. It may be useful in memory-limited contexts,
// to catch allocations that will exceed the limit and take outside
// actions to increase such a limit.
typedef MallocHook_PreSbrkHook PreSbrkHook;
inline static bool AddPreSbrkHook(PreSbrkHook hook) {
return MallocHook_AddPreSbrkHook(hook);
}
inline static bool RemovePreSbrkHook(PreSbrkHook hook) {
return MallocHook_RemovePreSbrkHook(hook);
}
inline static void InvokePreSbrkHook(ptrdiff_t increment);
// The SbrkHook is invoked whenever sbrk is called -- except when
// the increment is 0. This is because sbrk(0) is often called
// to get the top of the memory stack, and is not actually a
// memory-allocation call.
typedef MallocHook_SbrkHook SbrkHook;
inline static bool AddSbrkHook(SbrkHook hook) {
return MallocHook_AddSbrkHook(hook);
}
inline static bool RemoveSbrkHook(SbrkHook hook) {
return MallocHook_RemoveSbrkHook(hook);
}
inline static void InvokeSbrkHook(const void* result, ptrdiff_t increment);
// Get the current stack trace. Try to skip all routines up to and
// and including the caller of MallocHook::Invoke*.
// Use "skip_count" (similarly to GetStackTrace from stacktrace.h)
// as a hint about how many routines to skip if better information
// is not available.
inline static int GetCallerStackTrace(void** result, int max_depth,
int skip_count) {
return MallocHook_GetCallerStackTrace(result, max_depth, skip_count);
}
// Unhooked versions of mmap() and munmap(). These should be used
// only by experts, since they bypass heapchecking, etc.
// Note: These do not run hooks, but they still use the MmapReplacement
// and MunmapReplacement.
static void* UnhookedMMap(void *start, size_t length, int prot, int flags,
int fd, off_t offset);
static int UnhookedMUnmap(void *start, size_t length);
// The following are DEPRECATED.
inline static NewHook GetNewHook();
inline static NewHook SetNewHook(NewHook hook) {
return MallocHook_SetNewHook(hook);
}
inline static DeleteHook GetDeleteHook();
inline static DeleteHook SetDeleteHook(DeleteHook hook) {
return MallocHook_SetDeleteHook(hook);
}
inline static PreMmapHook GetPreMmapHook();
inline static PreMmapHook SetPreMmapHook(PreMmapHook hook) {
return MallocHook_SetPreMmapHook(hook);
}
inline static MmapHook GetMmapHook();
inline static MmapHook SetMmapHook(MmapHook hook) {
return MallocHook_SetMmapHook(hook);
}
inline static MunmapHook GetMunmapHook();
inline static MunmapHook SetMunmapHook(MunmapHook hook) {
return MallocHook_SetMunmapHook(hook);
}
inline static MremapHook GetMremapHook();
inline static MremapHook SetMremapHook(MremapHook hook) {
return MallocHook_SetMremapHook(hook);
}
inline static PreSbrkHook GetPreSbrkHook();
inline static PreSbrkHook SetPreSbrkHook(PreSbrkHook hook) {
return MallocHook_SetPreSbrkHook(hook);
}
inline static SbrkHook GetSbrkHook();
inline static SbrkHook SetSbrkHook(SbrkHook hook) {
return MallocHook_SetSbrkHook(hook);
}
// End of DEPRECATED methods.
private:
// Slow path versions of Invoke*Hook.
static void InvokeNewHookSlow(const void* p, size_t s);
static void InvokeDeleteHookSlow(const void* p);
static void InvokePreMmapHookSlow(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
static void InvokeMmapHookSlow(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
static bool InvokeMmapReplacementSlow(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result);
static void InvokeMunmapHookSlow(const void* p, size_t size);
static bool InvokeMunmapReplacementSlow(const void* p,
size_t size,
int* result);
static void InvokeMremapHookSlow(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr);
static void InvokePreSbrkHookSlow(ptrdiff_t increment);
static void InvokeSbrkHookSlow(const void* result, ptrdiff_t increment);
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif /* _MALLOC_HOOK_H_ */

View File

@ -1,173 +0,0 @@
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* --
* Author: Craig Silverstein
*
* C shims for the C++ malloc_hook.h. See malloc_hook.h for details
* on how to use these.
*/
#ifndef _MALLOC_HOOK_C_H_
#define _MALLOC_HOOK_C_H_
#include <stddef.h>
#include <sys/types.h>
/* Annoying stuff for windows; makes sure clients can import these functions */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Get the current stack trace. Try to skip all routines up to and
* and including the caller of MallocHook::Invoke*.
* Use "skip_count" (similarly to GetStackTrace from stacktrace.h)
* as a hint about how many routines to skip if better information
* is not available.
*/
PERFTOOLS_DLL_DECL
int MallocHook_GetCallerStackTrace(void** result, int max_depth,
int skip_count);
/* The MallocHook_{Add,Remove}*Hook functions return 1 on success and 0 on
* failure.
*/
typedef void (*MallocHook_NewHook)(const void* ptr, size_t size);
PERFTOOLS_DLL_DECL
int MallocHook_AddNewHook(MallocHook_NewHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveNewHook(MallocHook_NewHook hook);
typedef void (*MallocHook_DeleteHook)(const void* ptr);
PERFTOOLS_DLL_DECL
int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook);
typedef void (*MallocHook_PreMmapHook)(const void *start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
PERFTOOLS_DLL_DECL
int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook);
typedef void (*MallocHook_MmapHook)(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
PERFTOOLS_DLL_DECL
int MallocHook_AddMmapHook(MallocHook_MmapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook);
typedef int (*MallocHook_MmapReplacement)(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result);
int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook);
int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook);
typedef void (*MallocHook_MunmapHook)(const void* ptr, size_t size);
PERFTOOLS_DLL_DECL
int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook);
typedef int (*MallocHook_MunmapReplacement)(const void* ptr,
size_t size,
int* result);
int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook);
int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook);
typedef void (*MallocHook_MremapHook)(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr);
PERFTOOLS_DLL_DECL
int MallocHook_AddMremapHook(MallocHook_MremapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook);
typedef void (*MallocHook_PreSbrkHook)(ptrdiff_t increment);
PERFTOOLS_DLL_DECL
int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook);
typedef void (*MallocHook_SbrkHook)(const void* result, ptrdiff_t increment);
PERFTOOLS_DLL_DECL
int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook);
/* The following are DEPRECATED. */
PERFTOOLS_DLL_DECL
MallocHook_NewHook MallocHook_SetNewHook(MallocHook_NewHook hook);
PERFTOOLS_DLL_DECL
MallocHook_DeleteHook MallocHook_SetDeleteHook(MallocHook_DeleteHook hook);
PERFTOOLS_DLL_DECL
MallocHook_PreMmapHook MallocHook_SetPreMmapHook(MallocHook_PreMmapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_MmapHook MallocHook_SetMmapHook(MallocHook_MmapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_MunmapHook MallocHook_SetMunmapHook(MallocHook_MunmapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_MremapHook MallocHook_SetMremapHook(MallocHook_MremapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_PreSbrkHook MallocHook_SetPreSbrkHook(MallocHook_PreSbrkHook hook);
PERFTOOLS_DLL_DECL
MallocHook_SbrkHook MallocHook_SetSbrkHook(MallocHook_SbrkHook hook);
/* End of DEPRECATED functions. */
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* _MALLOC_HOOK_C_H_ */

View File

@ -1,169 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*
* Module for CPU profiling based on periodic pc-sampling.
*
* For full(er) information, see doc/cpuprofile.html
*
* This module is linked into your program with
* no slowdown caused by this unless you activate the profiler
* using one of the following methods:
*
* 1. Before starting the program, set the environment variable
* "CPUPROFILE" to be the name of the file to which the profile
* data should be written.
*
* 2. Programmatically, start and stop the profiler using the
* routines "ProfilerStart(filename)" and "ProfilerStop()".
*
*
* (Note: if using linux 2.4 or earlier, only the main thread may be
* profiled.)
*
* Use pprof to view the resulting profile output.
* % pprof <path_to_executable> <profile_file_name>
* % pprof --gv <path_to_executable> <profile_file_name>
*
* These functions are thread-safe.
*/
#ifndef BASE_PROFILER_H_
#define BASE_PROFILER_H_
#include <time.h> /* For time_t */
/* Annoying stuff for windows; makes sure clients can import these functions */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
/* All this code should be usable from within C apps. */
#ifdef __cplusplus
extern "C" {
#endif
/* Profiler options, for use with ProfilerStartWithOptions. To use:
*
* struct ProfilerOptions options;
* memset(&options, 0, sizeof options);
*
* then fill in fields as needed.
*
* This structure is intended to be usable from C code, so no constructor
* is provided to initialize it. (Use memset as described above).
*/
struct ProfilerOptions {
/* Filter function and argument.
*
* If filter_in_thread is not NULL, when a profiling tick is delivered
* the profiler will call:
*
* (*filter_in_thread)(filter_in_thread_arg)
*
* If it returns nonzero, the sample will be included in the profile.
* Note that filter_in_thread runs in a signal handler, so must be
* async-signal-safe.
*
* A typical use would be to set up filter results for each thread
* in the system before starting the profiler, then to make
* filter_in_thread be a very simple function which retrieves those
* results in an async-signal-safe way. Retrieval could be done
* using thread-specific data, or using a shared data structure that
* supports async-signal-safe lookups.
*/
int (*filter_in_thread)(void *arg);
void *filter_in_thread_arg;
};
/* Start profiling and write profile info into fname, discarding any
* existing profiling data in that file.
*
* This is equivalent to calling ProfilerStartWithOptions(fname, NULL).
*/
PERFTOOLS_DLL_DECL int ProfilerStart(const char* fname);
/* Start profiling and write profile into fname, discarding any
* existing profiling data in that file.
*
* The profiler is configured using the options given by 'options'.
* Options which are not specified are given default values.
*
* 'options' may be NULL, in which case all are given default values.
*
* Returns nonzero if profiling was started successfully, or zero else.
*/
PERFTOOLS_DLL_DECL int ProfilerStartWithOptions(
const char *fname, const struct ProfilerOptions *options);
/* Stop profiling. Can be started again with ProfilerStart(), but
* the currently accumulated profiling data will be cleared.
*/
PERFTOOLS_DLL_DECL void ProfilerStop(void);
/* Flush any currently buffered profiling state to the profile file.
* Has no effect if the profiler has not been started.
*/
PERFTOOLS_DLL_DECL void ProfilerFlush(void);
/* DEPRECATED: these functions were used to enable/disable profiling
* in the current thread, but no longer do anything.
*/
PERFTOOLS_DLL_DECL void ProfilerEnable(void);
PERFTOOLS_DLL_DECL void ProfilerDisable(void);
/* Returns nonzero if profile is currently enabled, zero if it's not. */
PERFTOOLS_DLL_DECL int ProfilingIsEnabledForAllThreads(void);
/* Routine for registering new threads with the profiler.
*/
PERFTOOLS_DLL_DECL void ProfilerRegisterThread(void);
/* Stores state about profiler's current status into "*state". */
struct ProfilerState {
int enabled; /* Is profiling currently enabled? */
time_t start_time; /* If enabled, when was profiling started? */
char profile_name[1024]; /* Name of profile file being written, or '\0' */
int samples_gathered; /* Number of samples gathered so far (or 0) */
};
PERFTOOLS_DLL_DECL void ProfilerGetCurrentState(struct ProfilerState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* BASE_PROFILER_H_ */

View File

@ -1,117 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// Routines to extract the current stack trace. These functions are
// thread-safe.
#ifndef GOOGLE_STACKTRACE_H_
#define GOOGLE_STACKTRACE_H_
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
// Skips the most recent "skip_count" stack frames (also skips the
// frame generated for the "GetStackFrames" routine itself), and then
// records the pc values for up to the next "max_depth" frames in
// "result", and the corresponding stack frame sizes in "sizes".
// Returns the number of values recorded in "result"/"sizes".
//
// Example:
// main() { foo(); }
// foo() { bar(); }
// bar() {
// void* result[10];
// int sizes[10];
// int depth = GetStackFrames(result, sizes, 10, 1);
// }
//
// The GetStackFrames call will skip the frame for "bar". It will
// return 2 and will produce pc values that map to the following
// procedures:
// result[0] foo
// result[1] main
// (Actually, there may be a few more entries after "main" to account for
// startup procedures.)
// And corresponding stack frame sizes will also be recorded:
// sizes[0] 16
// sizes[1] 16
// (Stack frame sizes of 16 above are just for illustration purposes.)
// Stack frame sizes of 0 or less indicate that those frame sizes couldn't
// be identified.
//
// This routine may return fewer stack frame entries than are
// available. Also note that "result" and "sizes" must both be non-NULL.
extern PERFTOOLS_DLL_DECL int GetStackFrames(void** result, int* sizes, int max_depth,
int skip_count);
// Same as above, but to be used from a signal handler. The "uc" parameter
// should be the pointer to ucontext_t which was passed as the 3rd parameter
// to sa_sigaction signal handler. It may help the unwinder to get a
// better stack trace under certain conditions. The "uc" may safely be NULL.
extern PERFTOOLS_DLL_DECL int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
int skip_count, const void *uc);
// This is similar to the GetStackFrames routine, except that it returns
// the stack trace only, and not the stack frame sizes as well.
// Example:
// main() { foo(); }
// foo() { bar(); }
// bar() {
// void* result[10];
// int depth = GetStackTrace(result, 10, 1);
// }
//
// This produces:
// result[0] foo
// result[1] main
// .... ...
//
// "result" must not be NULL.
extern PERFTOOLS_DLL_DECL int GetStackTrace(void** result, int max_depth,
int skip_count);
// Same as above, but to be used from a signal handler. The "uc" parameter
// should be the pointer to ucontext_t which was passed as the 3rd parameter
// to sa_sigaction signal handler. It may help the unwinder to get a
// better stack trace under certain conditions. The "uc" may safely be NULL.
extern PERFTOOLS_DLL_DECL int GetStackTraceWithContext(void** result, int max_depth,
int skip_count, const void *uc);
#endif /* GOOGLE_STACKTRACE_H_ */

View File

@ -1,160 +0,0 @@
// -*- Mode: C; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2003, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat <opensource@google.com>
* .h file by Craig Silverstein <opensource@google.com>
*/
#ifndef TCMALLOC_TCMALLOC_H_
#define TCMALLOC_TCMALLOC_H_
#include <stddef.h> /* for size_t */
/* Define the version number so folks can check against it */
#define TC_VERSION_MAJOR 2
#define TC_VERSION_MINOR 5
#define TC_VERSION_PATCH ""
#define TC_VERSION_STRING "gperftools 2.5"
/* For struct mallinfo, if it's defined. */
#if !defined(__APPLE__) && !defined(__FreeBSD__)
# include <malloc.h>
#else
struct mallinfo {
size_t arena; /* non-mmapped space allocated from system */
size_t ordblks; /* number of free chunks */
size_t smblks; /* always 0 */
size_t hblks; /* always 0 */
size_t hblkhd; /* space in mmapped regions */
size_t usmblks; /* maximum total allocated space */
size_t fsmblks; /* always 0 */
size_t uordblks; /* total allocated space */
size_t fordblks; /* total free space */
size_t keepcost; /* releasable (via malloc_trim) space */
};
#endif
#ifdef __cplusplus
#define PERFTOOLS_THROW throw()
#else
# ifdef __GNUC__
# define PERFTOOLS_THROW __attribute__((__nothrow__))
# else
# define PERFTOOLS_THROW
# endif
#endif
#ifndef PERFTOOLS_DLL_DECL
#define PERFTOOLS_DLL_DECL_DEFINED
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
#ifdef __cplusplus
namespace std {
struct nothrow_t;
}
extern "C" {
#endif
/*
* Returns a human-readable version string. If major, minor,
* and/or patch are not NULL, they are set to the major version,
* minor version, and patch-code (a string, usually "").
*/
PERFTOOLS_DLL_DECL const char* tc_version(int* major, int* minor,
const char** patch) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_malloc_skip_new_handler(size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_free(void* ptr) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_free_sized(void *ptr, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_realloc(void* ptr, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_calloc(size_t nmemb, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_cfree(void* ptr) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_memalign(size_t __alignment,
size_t __size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL int tc_posix_memalign(void** ptr,
size_t align, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_valloc(size_t __size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_pvalloc(size_t __size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_malloc_stats(void) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) PERFTOOLS_THROW;
#if 1
PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) PERFTOOLS_THROW;
#endif
/*
* This is an alias for MallocExtension::instance()->GetAllocatedSize().
* It is equivalent to
* OS X: malloc_size()
* glibc: malloc_usable_size()
* Windows: _msize()
*/
PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) PERFTOOLS_THROW;
#ifdef __cplusplus
PERFTOOLS_DLL_DECL int tc_set_new_mode(int flag) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_new(size_t size);
PERFTOOLS_DLL_DECL void* tc_new_nothrow(size_t size,
const std::nothrow_t&) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_delete(void* p) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_delete_sized(void* p, size_t size) throw();
PERFTOOLS_DLL_DECL void tc_delete_nothrow(void* p,
const std::nothrow_t&) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_newarray(size_t size);
PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(size_t size,
const std::nothrow_t&) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_deletearray(void* p) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_deletearray_sized(void* p, size_t size) throw();
PERFTOOLS_DLL_DECL void tc_deletearray_nothrow(void* p,
const std::nothrow_t&) PERFTOOLS_THROW;
}
#endif
/* We're only un-defining those for public */
#if !defined(GPERFTOOLS_CONFIG_H_)
#undef PERFTOOLS_THROW
#ifdef PERFTOOLS_DLL_DECL_DEFINED
#undef PERFTOOLS_DLL_DECL
#undef PERFTOOLS_DLL_DECL_DEFINED
#endif
#endif /* GPERFTOOLS_CONFIG_H_ */
#endif /* #ifndef TCMALLOC_TCMALLOC_H_ */

View File

@ -1,422 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// A fast map from addresses to values. Assumes that addresses are
// clustered. The main use is intended to be for heap-profiling.
// May be too memory-hungry for other uses.
//
// We use a user-defined allocator/de-allocator so that we can use
// this data structure during heap-profiling.
//
// IMPLEMENTATION DETAIL:
//
// Some default definitions/parameters:
// * Block -- aligned 128-byte region of the address space
// * Cluster -- aligned 1-MB region of the address space
// * Block-ID -- block-number within a cluster
// * Cluster-ID -- Starting address of cluster divided by cluster size
//
// We use a three-level map to represent the state:
// 1. A hash-table maps from a cluster-ID to the data for that cluster.
// 2. For each non-empty cluster we keep an array indexed by
// block-ID tht points to the first entry in the linked-list
// for the block.
// 3. At the bottom, we keep a singly-linked list of all
// entries in a block (for non-empty blocks).
//
// hash table
// +-------------+
// | id->cluster |---> ...
// | ... |
// | id->cluster |---> Cluster
// +-------------+ +-------+ Data for one block
// | nil | +------------------------------------+
// | ----+---|->[addr/value]-->[addr/value]-->... |
// | nil | +------------------------------------+
// | ----+--> ...
// | nil |
// | ... |
// +-------+
//
// Note that we require zero-bytes of overhead for completely empty
// clusters. The minimum space requirement for a cluster is the size
// of the hash-table entry plus a pointer value for each block in
// the cluster. Empty blocks impose no extra space requirement.
//
// The cost of a lookup is:
// a. A hash-table lookup to find the cluster
// b. An array access in the cluster structure
// c. A traversal over the linked-list for a block
#ifndef BASE_ADDRESSMAP_INL_H_
#define BASE_ADDRESSMAP_INL_H_
#include "config.h"
#include <stddef.h>
#include <string.h>
#if defined HAVE_STDINT_H
#include <stdint.h> // to get uint16_t (ISO naming madness)
#elif defined HAVE_INTTYPES_H
#include <inttypes.h> // another place uint16_t might be defined
#else
#include <sys/types.h> // our last best hope
#endif
// This class is thread-unsafe -- that is, instances of this class can
// not be accessed concurrently by multiple threads -- because the
// callback function for Iterate() may mutate contained values. If the
// callback functions you pass do not mutate their Value* argument,
// AddressMap can be treated as thread-compatible -- that is, it's
// safe for multiple threads to call "const" methods on this class,
// but not safe for one thread to call const methods on this class
// while another thread is calling non-const methods on the class.
template <class Value>
class AddressMap {
public:
typedef void* (*Allocator)(size_t size);
typedef void (*DeAllocator)(void* ptr);
typedef const void* Key;
// Create an AddressMap that uses the specified allocator/deallocator.
// The allocator/deallocator should behave like malloc/free.
// For instance, the allocator does not need to return initialized memory.
AddressMap(Allocator alloc, DeAllocator dealloc);
~AddressMap();
// If the map contains an entry for "key", return it. Else return NULL.
inline const Value* Find(Key key) const;
inline Value* FindMutable(Key key);
// Insert <key,value> into the map. Any old value associated
// with key is forgotten.
void Insert(Key key, Value value);
// Remove any entry for key in the map. If an entry was found
// and removed, stores the associated value in "*removed_value"
// and returns true. Else returns false.
bool FindAndRemove(Key key, Value* removed_value);
// Similar to Find but we assume that keys are addresses of non-overlapping
// memory ranges whose sizes are given by size_func.
// If the map contains a range into which "key" points
// (at its start or inside of it, but not at the end),
// return the address of the associated value
// and store its key in "*res_key".
// Else return NULL.
// max_size specifies largest range size possibly in existence now.
typedef size_t (*ValueSizeFunc)(const Value& v);
const Value* FindInside(ValueSizeFunc size_func, size_t max_size,
Key key, Key* res_key);
// Iterate over the address map calling 'callback'
// for all stored key-value pairs and passing 'arg' to it.
// We don't use full Closure/Callback machinery not to add
// unnecessary dependencies to this class with low-level uses.
template<class Type>
inline void Iterate(void (*callback)(Key, Value*, Type), Type arg) const;
private:
typedef uintptr_t Number;
// The implementation assumes that addresses inserted into the map
// will be clustered. We take advantage of this fact by splitting
// up the address-space into blocks and using a linked-list entry
// for each block.
// Size of each block. There is one linked-list for each block, so
// do not make the block-size too big. Oterwise, a lot of time
// will be spent traversing linked lists.
static const int kBlockBits = 7;
static const int kBlockSize = 1 << kBlockBits;
// Entry kept in per-block linked-list
struct Entry {
Entry* next;
Key key;
Value value;
};
// We further group a sequence of consecutive blocks into a cluster.
// The data for a cluster is represented as a dense array of
// linked-lists, one list per contained block.
static const int kClusterBits = 13;
static const Number kClusterSize = 1 << (kBlockBits + kClusterBits);
static const int kClusterBlocks = 1 << kClusterBits;
// We use a simple chaining hash-table to represent the clusters.
struct Cluster {
Cluster* next; // Next cluster in hash table chain
Number id; // Cluster ID
Entry* blocks[kClusterBlocks]; // Per-block linked-lists
};
// Number of hash-table entries. With the block-size/cluster-size
// defined above, each cluster covers 1 MB, so an 4K entry
// hash-table will give an average hash-chain length of 1 for 4GB of
// in-use memory.
static const int kHashBits = 12;
static const int kHashSize = 1 << 12;
// Number of entry objects allocated at a time
static const int ALLOC_COUNT = 64;
Cluster** hashtable_; // The hash-table
Entry* free_; // Free list of unused Entry objects
// Multiplicative hash function:
// The value "kHashMultiplier" is the bottom 32 bits of
// int((sqrt(5)-1)/2 * 2^32)
// This is a good multiplier as suggested in CLR, Knuth. The hash
// value is taken to be the top "k" bits of the bottom 32 bits
// of the muliplied value.
static const uint32_t kHashMultiplier = 2654435769u;
static int HashInt(Number x) {
// Multiply by a constant and take the top bits of the result.
const uint32_t m = static_cast<uint32_t>(x) * kHashMultiplier;
return static_cast<int>(m >> (32 - kHashBits));
}
// Find cluster object for specified address. If not found
// and "create" is true, create the object. If not found
// and "create" is false, return NULL.
//
// This method is bitwise-const if create is false.
Cluster* FindCluster(Number address, bool create) {
// Look in hashtable
const Number cluster_id = address >> (kBlockBits + kClusterBits);
const int h = HashInt(cluster_id);
for (Cluster* c = hashtable_[h]; c != NULL; c = c->next) {
if (c->id == cluster_id) {
return c;
}
}
// Create cluster if necessary
if (create) {
Cluster* c = New<Cluster>(1);
c->id = cluster_id;
c->next = hashtable_[h];
hashtable_[h] = c;
return c;
}
return NULL;
}
// Return the block ID for an address within its cluster
static int BlockID(Number address) {
return (address >> kBlockBits) & (kClusterBlocks - 1);
}
//--------------------------------------------------------------
// Memory management -- we keep all objects we allocate linked
// together in a singly linked list so we can get rid of them
// when we are all done. Furthermore, we allow the client to
// pass in custom memory allocator/deallocator routines.
//--------------------------------------------------------------
struct Object {
Object* next;
// The real data starts here
};
Allocator alloc_; // The allocator
DeAllocator dealloc_; // The deallocator
Object* allocated_; // List of allocated objects
// Allocates a zeroed array of T with length "num". Also inserts
// the allocated block into a linked list so it can be deallocated
// when we are all done.
template <class T> T* New(int num) {
void* ptr = (*alloc_)(sizeof(Object) + num*sizeof(T));
memset(ptr, 0, sizeof(Object) + num*sizeof(T));
Object* obj = reinterpret_cast<Object*>(ptr);
obj->next = allocated_;
allocated_ = obj;
return reinterpret_cast<T*>(reinterpret_cast<Object*>(ptr) + 1);
}
};
// More implementation details follow:
template <class Value>
AddressMap<Value>::AddressMap(Allocator alloc, DeAllocator dealloc)
: free_(NULL),
alloc_(alloc),
dealloc_(dealloc),
allocated_(NULL) {
hashtable_ = New<Cluster*>(kHashSize);
}
template <class Value>
AddressMap<Value>::~AddressMap() {
// De-allocate all of the objects we allocated
for (Object* obj = allocated_; obj != NULL; /**/) {
Object* next = obj->next;
(*dealloc_)(obj);
obj = next;
}
}
template <class Value>
inline const Value* AddressMap<Value>::Find(Key key) const {
return const_cast<AddressMap*>(this)->FindMutable(key);
}
template <class Value>
inline Value* AddressMap<Value>::FindMutable(Key key) {
const Number num = reinterpret_cast<Number>(key);
const Cluster* const c = FindCluster(num, false/*do not create*/);
if (c != NULL) {
for (Entry* e = c->blocks[BlockID(num)]; e != NULL; e = e->next) {
if (e->key == key) {
return &e->value;
}
}
}
return NULL;
}
template <class Value>
void AddressMap<Value>::Insert(Key key, Value value) {
const Number num = reinterpret_cast<Number>(key);
Cluster* const c = FindCluster(num, true/*create*/);
// Look in linked-list for this block
const int block = BlockID(num);
for (Entry* e = c->blocks[block]; e != NULL; e = e->next) {
if (e->key == key) {
e->value = value;
return;
}
}
// Create entry
if (free_ == NULL) {
// Allocate a new batch of entries and add to free-list
Entry* array = New<Entry>(ALLOC_COUNT);
for (int i = 0; i < ALLOC_COUNT-1; i++) {
array[i].next = &array[i+1];
}
array[ALLOC_COUNT-1].next = free_;
free_ = &array[0];
}
Entry* e = free_;
free_ = e->next;
e->key = key;
e->value = value;
e->next = c->blocks[block];
c->blocks[block] = e;
}
template <class Value>
bool AddressMap<Value>::FindAndRemove(Key key, Value* removed_value) {
const Number num = reinterpret_cast<Number>(key);
Cluster* const c = FindCluster(num, false/*do not create*/);
if (c != NULL) {
for (Entry** p = &c->blocks[BlockID(num)]; *p != NULL; p = &(*p)->next) {
Entry* e = *p;
if (e->key == key) {
*removed_value = e->value;
*p = e->next; // Remove e from linked-list
e->next = free_; // Add e to free-list
free_ = e;
return true;
}
}
}
return false;
}
template <class Value>
const Value* AddressMap<Value>::FindInside(ValueSizeFunc size_func,
size_t max_size,
Key key,
Key* res_key) {
const Number key_num = reinterpret_cast<Number>(key);
Number num = key_num; // we'll move this to move back through the clusters
while (1) {
const Cluster* c = FindCluster(num, false/*do not create*/);
if (c != NULL) {
while (1) {
const int block = BlockID(num);
bool had_smaller_key = false;
for (const Entry* e = c->blocks[block]; e != NULL; e = e->next) {
const Number e_num = reinterpret_cast<Number>(e->key);
if (e_num <= key_num) {
if (e_num == key_num || // to handle 0-sized ranges
key_num < e_num + (*size_func)(e->value)) {
*res_key = e->key;
return &e->value;
}
had_smaller_key = true;
}
}
if (had_smaller_key) return NULL; // got a range before 'key'
// and it did not contain 'key'
if (block == 0) break;
// try address-wise previous block
num |= kBlockSize - 1; // start at the last addr of prev block
num -= kBlockSize;
if (key_num - num > max_size) return NULL;
}
}
if (num < kClusterSize) return NULL; // first cluster
// go to address-wise previous cluster to try
num |= kClusterSize - 1; // start at the last block of previous cluster
num -= kClusterSize;
if (key_num - num > max_size) return NULL;
// Having max_size to limit the search is crucial: else
// we have to traverse a lot of empty clusters (or blocks).
// We can avoid needing max_size if we put clusters into
// a search tree, but performance suffers considerably
// if we use this approach by using stl::set.
}
}
template <class Value>
template <class Type>
inline void AddressMap<Value>::Iterate(void (*callback)(Key, Value*, Type),
Type arg) const {
// We could optimize this by traversing only non-empty clusters and/or blocks
// but it does not speed up heap-checker noticeably.
for (int h = 0; h < kHashSize; ++h) {
for (const Cluster* c = hashtable_[h]; c != NULL; c = c->next) {
for (int b = 0; b < kClusterBlocks; ++b) {
for (Entry* e = c->blocks[b]; e != NULL; e = e->next) {
callback(e->key, &e->value, arg);
}
}
}
}
}
#endif // BASE_ADDRESSMAP_INL_H_

View File

@ -1,84 +0,0 @@
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Author: Alexander Levitskiy
//
// Generalizes the plethora of ARM flavors available to an easier to manage set
// Defs reference is at https://wiki.edubuntu.org/ARM/Thumb2PortingHowto
#ifndef ARM_INSTRUCTION_SET_SELECT_H_
#define ARM_INSTRUCTION_SET_SELECT_H_
#if defined(__ARM_ARCH_8A__)
# define ARMV8 1
#endif
#if defined(ARMV8) || \
defined(__ARM_ARCH_7__) || \
defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7A__)
# define ARMV7 1
#endif
#if defined(ARMV7) || \
defined(__ARM_ARCH_6__) || \
defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || \
defined(__ARM_ARCH_6Z__) || \
defined(__ARM_ARCH_6T2__) || \
defined(__ARM_ARCH_6ZK__)
# define ARMV6 1
#endif
#if defined(ARMV6) || \
defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5E__) || \
defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__)
# define ARMV5 1
#endif
#if defined(ARMV5) || \
defined(__ARM_ARCH_4__) || \
defined(__ARM_ARCH_4T__)
# define ARMV4 1
#endif
#if defined(ARMV4) || \
defined(__ARM_ARCH_3__) || \
defined(__ARM_ARCH_3M__)
# define ARMV3 1
#endif
#if defined(ARMV3) || \
defined(__ARM_ARCH_2__)
# define ARMV2 1
#endif
#endif // ARM_INSTRUCTION_SET_SELECT_H_

View File

@ -1,228 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2003, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// Author: Lei Zhang, Sasha Levitskiy
//
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
// LinuxKernelCmpxchg is from Google Gears.
#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
#define BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h"
typedef int32_t Atomic32;
namespace base {
namespace subtle {
typedef int64_t Atomic64;
// 0xffff0fc0 is the hard coded address of a function provided by
// the kernel which implements an atomic compare-exchange. On older
// ARM architecture revisions (pre-v6) this may be implemented using
// a syscall. This address is stable, and in active use (hard coded)
// by at least glibc-2.7 and the Android C library.
// pLinuxKernelCmpxchg has both acquire and release barrier sematincs.
typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
Atomic32 new_value,
volatile Atomic32* ptr);
LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg ATTRIBUTE_WEAK =
(LinuxKernelCmpxchgFunc) 0xffff0fc0;
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier ATTRIBUTE_WEAK =
(LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = *ptr;
do {
if (!pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
pLinuxKernelMemoryBarrier();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions are not implemented yet.
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
function_name);
abort();
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_CompareAndSwap");
return 0;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_AtomicExchange");
return 0;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("NoBarrier_Store");
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("Acquire_Store64");
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("Release_Store");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("NoBarrier_Load");
return 0;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("Atomic64 Acquire_Load");
return 0;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("Atomic64 Release_Load");
return 0;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("Atomic64 Acquire_CompareAndSwap");
return 0;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("Atomic64 Release_CompareAndSwap");
return 0;
}
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_

View File

@ -1,330 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// Author: Sasha Levitskiy
// based on atomicops-internals by Sanjay Ghemawat
//
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
// This code implements ARM atomics for architectures V6 and newer.
#ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
#define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h" // For COMPILE_ASSERT
// The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6,
// only some variants support it. For simplicity, we only use exclusive
// 64-bit load/store in V7 or above.
#if defined(ARMV7)
# define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
#endif
typedef int32_t Atomic32;
namespace base {
namespace subtle {
typedef int64_t Atomic64;
// 32-bit low-level ops
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 oldval, res;
do {
__asm__ __volatile__(
"ldrex %1, [%3]\n"
"mov %0, #0\n"
"teq %1, %4\n"
// The following IT (if-then) instruction is needed for the subsequent
// conditional instruction STREXEQ when compiling in THUMB mode.
// In ARM mode, the compiler/assembler will not generate any code for it.
"it eq\n"
"strexeq %0, %5, [%3]\n"
: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
: "r" (ptr), "Ir" (old_value), "r" (new_value)
: "cc");
} while (res);
return oldval;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 tmp, old;
__asm__ __volatile__(
"1:\n"
"ldrex %1, [%2]\n"
"strex %0, %3, [%2]\n"
"teq %0, #0\n"
"bne 1b"
: "=&r" (tmp), "=&r" (old)
: "r" (ptr), "r" (new_value)
: "cc", "memory");
return old;
}
inline void MemoryBarrier() {
#if !defined(ARMV7)
uint32_t dest = 0;
__asm__ __volatile__("mcr p15,0,%0,c7,c10,5" :"=&r"(dest) : : "memory");
#else
__asm__ __volatile__("dmb" : : : "memory");
#endif
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions are only available if LDREXD and STREXD instructions
// are available.
#ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
#define BASE_HAS_ATOMIC64 1
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 oldval, res;
do {
__asm__ __volatile__(
"ldrexd %1, [%3]\n"
"mov %0, #0\n"
"teq %Q1, %Q4\n"
// The following IT (if-then) instructions are needed for the subsequent
// conditional instructions when compiling in THUMB mode.
// In ARM mode, the compiler/assembler will not generate any code for it.
"it eq\n"
"teqeq %R1, %R4\n"
"it eq\n"
"strexdeq %0, %5, [%3]\n"
: "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
: "r" (ptr), "Ir" (old_value), "r" (new_value)
: "cc");
} while (res);
return oldval;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
int store_failed;
Atomic64 old;
__asm__ __volatile__(
"1:\n"
"ldrexd %1, [%2]\n"
"strexd %0, %3, [%2]\n"
"teq %0, #0\n"
"bne 1b"
: "=&r" (store_failed), "=&r" (old)
: "r" (ptr), "r" (new_value)
: "cc", "memory");
return old;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
int store_failed;
Atomic64 dummy;
__asm__ __volatile__(
"1:\n"
// Dummy load to lock cache line.
"ldrexd %1, [%3]\n"
"strexd %0, %2, [%3]\n"
"teq %0, #0\n"
"bne 1b"
: "=&r" (store_failed), "=&r"(dummy)
: "r"(value), "r" (ptr)
: "cc", "memory");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
Atomic64 res;
__asm__ __volatile__(
"ldrexd %0, [%1]\n"
"clrex\n"
: "=r" (res)
: "r"(ptr), "Q"(*ptr));
return res;
}
#else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
function_name);
abort();
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_CompareAndSwap");
return 0;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_AtomicExchange");
return 0;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("Acquire_AtomicExchange");
return 0;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("Release_AtomicExchange");
return 0;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("NoBarrier_Store");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("NoBarrier_Load");
return 0;
}
#endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
NoBarrier_Store(ptr, value);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = NoBarrier_Load(ptr);
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
} // namespace subtle ends
} // namespace base ends
#endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_

View File

@ -1,203 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2014, Linaro
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// Author: Riku Voipio, riku.voipio@linaro.org
//
// atomic primitives implemented with gcc atomic intrinsics:
// http://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
//
#ifndef BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_
#define BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h"
typedef int32_t Atomic32;
namespace base {
namespace subtle {
typedef int64_t Atomic64;
inline void MemoryBarrier() {
__sync_synchronize();
}
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_RELAXED);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_ACQUIRE);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_RELEASE);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
return prev_value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_RELAXED);
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_ACQUIRE);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_RELEASE);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
return prev_value;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_

View File

@ -1,437 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
*/
// Implementation of atomic operations for ppc-linux. This file should not
// be included directly. Clients should instead include
// "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
#define BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
typedef int32_t Atomic32;
#ifdef __PPC64__
#define BASE_HAS_ATOMIC64 1
#endif
namespace base {
namespace subtle {
static inline void _sync(void) {
__asm__ __volatile__("sync": : : "memory");
}
static inline void _lwsync(void) {
// gcc defines __NO_LWSYNC__ when appropriate; see
// http://gcc.gnu.org/ml/gcc-patches/2006-11/msg01238.html
#ifdef __NO_LWSYNC__
__asm__ __volatile__("msync": : : "memory");
#else
__asm__ __volatile__("lwsync": : : "memory");
#endif
}
static inline void _isync(void) {
__asm__ __volatile__("isync": : : "memory");
}
static inline Atomic32 OSAtomicAdd32(Atomic32 amount, Atomic32 *value) {
Atomic32 t;
__asm__ __volatile__(
"1: lwarx %0,0,%3\n\
add %0,%2,%0\n\
stwcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (t), "+m" (*value)
: "r" (amount), "r" (value)
: "cc");
return t;
}
static inline Atomic32 OSAtomicAdd32Barrier(Atomic32 amount, Atomic32 *value) {
Atomic32 t;
_lwsync();
t = OSAtomicAdd32(amount, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in OSAtomicAdd32) has a
// conditional branch with a data dependency on the update.
// Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline bool OSAtomicCompareAndSwap32(Atomic32 old_value,
Atomic32 new_value,
Atomic32 *value) {
Atomic32 prev;
__asm__ __volatile__(
"1: lwarx %0,0,%2\n\
cmpw 0,%0,%3\n\
bne- 2f\n\
stwcx. %4,0,%2\n\
bne- 1b\n\
2:"
: "=&r" (prev), "+m" (*value)
: "r" (value), "r" (old_value), "r" (new_value)
: "cc");
return prev == old_value;
}
static inline Atomic32 OSAtomicCompareAndSwap32Acquire(Atomic32 old_value,
Atomic32 new_value,
Atomic32 *value) {
Atomic32 t;
t = OSAtomicCompareAndSwap32(old_value, new_value, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in
// OSAtomicCompareAndSwap32) has a conditional branch with a data
// dependency on the update. Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline Atomic32 OSAtomicCompareAndSwap32Release(Atomic32 old_value,
Atomic32 new_value,
Atomic32 *value) {
_lwsync();
return OSAtomicCompareAndSwap32(old_value, new_value, value);
}
typedef int64_t Atomic64;
inline void MemoryBarrier() {
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
}
// 32-bit Versions.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32Acquire(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32Release(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Acquire(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Release(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
#ifdef __PPC64__
// 64-bit Versions.
static inline Atomic64 OSAtomicAdd64(Atomic64 amount, Atomic64 *value) {
Atomic64 t;
__asm__ __volatile__(
"1: ldarx %0,0,%3\n\
add %0,%2,%0\n\
stdcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (t), "+m" (*value)
: "r" (amount), "r" (value)
: "cc");
return t;
}
static inline Atomic64 OSAtomicAdd64Barrier(Atomic64 amount, Atomic64 *value) {
Atomic64 t;
_lwsync();
t = OSAtomicAdd64(amount, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in OSAtomicAdd64) has a
// conditional branch with a data dependency on the update.
// Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline bool OSAtomicCompareAndSwap64(Atomic64 old_value,
Atomic64 new_value,
Atomic64 *value) {
Atomic64 prev;
__asm__ __volatile__(
"1: ldarx %0,0,%2\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
stdcx. %4,0,%2\n\
bne- 1b\n\
2:"
: "=&r" (prev), "+m" (*value)
: "r" (value), "r" (old_value), "r" (new_value)
: "cc");
return prev == old_value;
}
static inline Atomic64 OSAtomicCompareAndSwap64Acquire(Atomic64 old_value,
Atomic64 new_value,
Atomic64 *value) {
Atomic64 t;
t = OSAtomicCompareAndSwap64(old_value, new_value, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in
// OSAtomicCompareAndSwap64) has a conditional branch with a data
// dependency on the update. Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline Atomic64 OSAtomicCompareAndSwap64Release(Atomic64 old_value,
Atomic64 new_value,
Atomic64 *value) {
_lwsync();
return OSAtomicCompareAndSwap64(old_value, new_value, value);
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64Acquire(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64Release(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Acquire(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Release(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
#endif
inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
}
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
_lwsync();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
Atomic32 value = *ptr;
_lwsync();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
return *ptr;
}
#ifdef __PPC64__
// 64-bit Versions.
inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
*ptr = value;
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
_lwsync();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
Atomic64 value = *ptr;
_lwsync();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
return *ptr;
}
#endif
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_

View File

@ -1,370 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Implementation of atomic operations for Mac OS X. This file should not
// be included directly. Clients should instead include
// "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_MACOSX_H_
#define BASE_ATOMICOPS_INTERNALS_MACOSX_H_
typedef int32_t Atomic32;
// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
// on the Mac, even when they are the same size. Similarly, on __ppc64__,
// AtomicWord and Atomic64 are always different. Thus, we need explicit
// casting.
#ifdef __LP64__
#define AtomicWordCastType base::subtle::Atomic64
#else
#define AtomicWordCastType Atomic32
#endif
#if defined(__LP64__) || defined(__i386__)
#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
#endif
#include <libkern/OSAtomic.h>
namespace base {
namespace subtle {
#if !defined(__LP64__) && defined(__ppc__)
// The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
// while the underlying assembly instructions are available only some
// implementations of PowerPC.
// The following inline functions will fail with the error message at compile
// time ONLY IF they are called. So it is safe to use this header if user
// code only calls AtomicWord and Atomic32 operations.
//
// NOTE(vchen): Implementation notes to implement the atomic ops below may
// be found in "PowerPC Virtual Environment Architecture, Book II,
// Version 2.02", January 28, 2005, Appendix B, page 46. Unfortunately,
// extra care must be taken to ensure data are properly 8-byte aligned, and
// that data are returned correctly according to Mac OS X ABI specs.
inline int64_t OSAtomicCompareAndSwap64(
int64_t oldValue, int64_t newValue, int64_t *theValue) {
__asm__ __volatile__(
"_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
return 0;
}
inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
__asm__ __volatile__(
"_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
return 0;
}
inline int64_t OSAtomicCompareAndSwap64Barrier(
int64_t oldValue, int64_t newValue, int64_t *theValue) {
int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
OSMemoryBarrier();
return prev;
}
inline int64_t OSAtomicAdd64Barrier(
int64_t theAmount, int64_t *theValue) {
int64_t new_val = OSAtomicAdd64(theAmount, theValue);
OSMemoryBarrier();
return new_val;
}
#endif
typedef int64_t Atomic64;
inline void MemoryBarrier() {
OSMemoryBarrier();
}
// 32-bit Versions.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
return Acquire_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit version
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
return Acquire_AtomicExchange(ptr, new_value);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
// The lib kern interface does not distinguish between
// Acquire and Release memory barriers; they are equivalent.
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
#ifdef __LP64__
// 64-bit implementation on 64-bit platform
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
MemoryBarrier();
return *ptr;
}
#else
// 64-bit implementation on 32-bit platform
#if defined(__ppc__)
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__(
"_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
__asm__ __volatile__(
"_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
return 0;
}
#elif defined(__i386__)
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Reset FP registers
: "=m" (*ptr)
: "m" (value)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
Atomic64 value;
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Reset FP registers
: "=m" (value)
: "m" (*ptr)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
return value;
}
#endif
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
MemoryBarrier();
NoBarrier_Store(ptr, value);
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
Atomic64 value = NoBarrier_Load(ptr);
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
#endif // __LP64__
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_MACOSX_H_

View File

@ -1,323 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2013, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Author: Jovan Zelincevic <jovan.zelincevic@imgtec.com>
// based on atomicops-internals by Sanjay Ghemawat
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
// This code implements MIPS atomics.
#ifndef BASE_ATOMICOPS_INTERNALS_MIPS_H_
#define BASE_ATOMICOPS_INTERNALS_MIPS_H_
#if (_MIPS_ISA == _MIPS_ISA_MIPS64)
#define BASE_HAS_ATOMIC64 1
#endif
typedef int32_t Atomic32;
namespace base {
namespace subtle {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value)
{
Atomic32 prev, tmp;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"ll %0, %5 \n" // prev = *ptr
"bne %0, %3, 2f \n" // if (prev != old_value) goto 2
" move %2, %4 \n" // tmp = new_value
"sc %2, %1 \n" // *ptr = tmp (with atomic check)
"beqz %2, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
"2: \n"
".set pop \n"
: "=&r" (prev), "=m" (*ptr),
"=&r" (tmp)
: "Ir" (old_value), "r" (new_value),
"m" (*ptr)
: "memory"
);
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value)
{
Atomic32 temp, old;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"ll %1, %2 \n" // old = *ptr
"move %0, %3 \n" // temp = new_value
"sc %0, %2 \n" // *ptr = temp (with atomic check)
"beqz %0, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
".set pop \n"
: "=&r" (temp), "=&r" (old),
"=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory"
);
return old;
}
inline void MemoryBarrier()
{
__asm__ volatile("sync" : : : "memory");
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value)
{
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value)
{
MemoryBarrier();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return res;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value)
{
*ptr = value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value)
{
Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value)
{
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value)
{
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value)
{
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr)
{
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr)
{
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr)
{
MemoryBarrier();
return *ptr;
}
#if (_MIPS_ISA == _MIPS_ISA_MIPS64) || (_MIPS_SIM == _MIPS_SIM_ABI64)
typedef int64_t Atomic64;
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value)
{
Atomic64 prev, tmp;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"lld %0, %5 \n" // prev = *ptr
"bne %0, %3, 2f \n" // if (prev != old_value) goto 2
" move %2, %4 \n" // tmp = new_value
"scd %2, %1 \n" // *ptr = tmp (with atomic check)
"beqz %2, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
"2: \n"
".set pop \n"
: "=&r" (prev), "=m" (*ptr),
"=&r" (tmp)
: "Ir" (old_value), "r" (new_value),
"m" (*ptr)
: "memory"
);
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value)
{
Atomic64 temp, old;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"lld %1, %2 \n" // old = *ptr
"move %0, %3 \n" // temp = new_value
"scd %0, %2 \n" // *ptr = temp (with atomic check)
"beqz %0, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
".set pop \n"
: "=&r" (temp), "=&r" (old),
"=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory"
);
return old;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value)
{
Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value)
{
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value)
{
MemoryBarrier();
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return res;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value)
{
*ptr = value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value)
{
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value)
{
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value)
{
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr)
{
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr)
{
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr)
{
MemoryBarrier();
return *ptr;
}
#endif
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_MIPS_H_

View File

@ -1,457 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// Implementation of atomic operations using Windows API
// functions. This file should not be included directly. Clients
// should instead include "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_WINDOWS_H_
#define BASE_ATOMICOPS_INTERNALS_WINDOWS_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h" // For COMPILE_ASSERT
typedef int32 Atomic32;
#if defined(_WIN64)
#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
#endif
namespace base {
namespace subtle {
typedef int64 Atomic64;
// 32-bit low-level operations on any platform
extern "C" {
// We use windows intrinsics when we can (they seem to be supported
// well on MSVC 8.0 and above). Unfortunately, in some
// environments, <windows.h> and <intrin.h> have conflicting
// declarations of some other intrinsics, breaking compilation:
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
// Therefore, we simply declare the relevant intrinsics ourself.
// MinGW has a bug in the header files where it doesn't indicate the
// first argument is volatile -- they're not up to date. See
// http://readlist.com/lists/lists.sourceforge.net/mingw-users/0/3861.html
// We have to const_cast away the volatile to avoid compiler warnings.
// TODO(csilvers): remove this once MinGW has updated MinGW/include/winbase.h
#if defined(__MINGW32__)
inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
LONG newval, LONG oldval) {
return ::InterlockedCompareExchange(const_cast<LONG*>(ptr), newval, oldval);
}
inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
return ::InterlockedExchange(const_cast<LONG*>(ptr), newval);
}
inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
return ::InterlockedExchangeAdd(const_cast<LONG*>(ptr), increment);
}
#elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0
// Unfortunately, in some environments, <windows.h> and <intrin.h>
// have conflicting declarations of some intrinsics, breaking
// compilation. So we declare the intrinsics we need ourselves. See
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
LONG _InterlockedCompareExchange(volatile LONG* ptr, LONG newval, LONG oldval);
#pragma intrinsic(_InterlockedCompareExchange)
inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
LONG newval, LONG oldval) {
return _InterlockedCompareExchange(ptr, newval, oldval);
}
LONG _InterlockedExchange(volatile LONG* ptr, LONG newval);
#pragma intrinsic(_InterlockedExchange)
inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
return _InterlockedExchange(ptr, newval);
}
LONG _InterlockedExchangeAdd(volatile LONG* ptr, LONG increment);
#pragma intrinsic(_InterlockedExchangeAdd)
inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
return _InterlockedExchangeAdd(ptr, increment);
}
#else
inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
LONG newval, LONG oldval) {
return ::InterlockedCompareExchange(ptr, newval, oldval);
}
inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
return ::InterlockedExchange(ptr, newval);
}
inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
return ::InterlockedExchangeAdd(ptr, increment);
}
#endif // ifdef __MINGW32__
} // extern "C"
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = FastInterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = FastInterlockedExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
} // namespace base::subtle
} // namespace base
// In msvc8/vs2005, winnt.h already contains a definition for
// MemoryBarrier in the global namespace. Add it there for earlier
// versions and forward to it from within the namespace.
#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
inline void MemoryBarrier() {
Atomic32 value = 0;
base::subtle::NoBarrier_AtomicExchange(&value, 0);
// actually acts as a barrier in thisd implementation
}
#endif
namespace base {
namespace subtle {
inline void MemoryBarrier() {
::MemoryBarrier();
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
Acquire_AtomicExchange(ptr, value);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit operations
#if defined(_WIN64) || defined(__MINGW64__)
// 64-bit low-level operations on 64-bit platform.
COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
// These are the intrinsics needed for 64-bit operations. Similar to the
// 32-bit case above.
extern "C" {
#if defined(__MINGW64__)
inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval) {
return ::InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr),
newval, oldval);
}
inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
return ::InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval);
}
inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
LONGLONG increment) {
return ::InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment);
}
#elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0
// Like above, we need to declare the intrinsics ourselves.
PVOID _InterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval);
#pragma intrinsic(_InterlockedCompareExchangePointer)
inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval) {
return _InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr),
newval, oldval);
}
PVOID _InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval);
#pragma intrinsic(_InterlockedExchangePointer)
inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
return _InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval);
}
LONGLONG _InterlockedExchangeAdd64(volatile LONGLONG* ptr, LONGLONG increment);
#pragma intrinsic(_InterlockedExchangeAdd64)
inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
LONGLONG increment) {
return _InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment);
}
#else
inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval) {
return ::InterlockedCompareExchangePointer(ptr, newval, oldval);
}
inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
return ::InterlockedExchangePointer(ptr, newval);
}
inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
LONGLONG increment) {
return ::InterlockedExchangeAdd64(ptr, increment);
}
#endif // ifdef __MINGW64__
} // extern "C"
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
PVOID result = FastInterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
PVOID result = FastInterlockedExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#else // defined(_WIN64) || defined(__MINGW64__)
// 64-bit low-level operations on 32-bit platform
// TODO(vchen): The GNU assembly below must be converted to MSVC inline
// assembly. Then the file should be renamed to ...-x86-msvc.h, probably.
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
function_name);
abort();
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
#if 0 // Not implemented
Atomic64 prev;
__asm__ __volatile__("movl (%3), %%ebx\n\t" // Move 64-bit new_value into
"movl 4(%3), %%ecx\n\t" // ecx:ebx
"lock; cmpxchg8b %1\n\t" // If edx:eax (old_value) same
: "=A" (prev) // as contents of ptr:
: "m" (*ptr), // ecx:ebx => ptr
"0" (old_value), // else:
"r" (&new_value) // old *ptr => edx:eax
: "memory", "%ebx", "%ecx");
return prev;
#else
NotImplementedFatalError("NoBarrier_CompareAndSwap");
return 0;
#endif
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
#if 0 // Not implemented
__asm__ __volatile__(
"movl (%2), %%ebx\n\t" // Move 64-bit new_value into
"movl 4(%2), %%ecx\n\t" // ecx:ebx
"0:\n\t"
"movl %1, %%eax\n\t" // Read contents of ptr into
"movl 4%1, %%edx\n\t" // edx:eax
"lock; cmpxchg8b %1\n\t" // Attempt cmpxchg; if *ptr
"jnz 0b\n\t" // is no longer edx:eax, loop
: "=A" (new_value)
: "m" (*ptr),
"r" (&new_value)
: "memory", "%ebx", "%ecx");
return new_value; // Now it's the previous value.
#else
NotImplementedFatalError("NoBarrier_AtomicExchange");
return 0;
#endif
}
inline void NoBarrier_Store(volatile Atomic64* ptrValue, Atomic64 value)
{
__asm {
movq mm0, value; // Use mmx reg for 64-bit atomic moves
mov eax, ptrValue;
movq [eax], mm0;
emms; // Empty mmx state to enable FP registers
}
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptrValue)
{
Atomic64 value;
__asm {
mov eax, ptrValue;
movq mm0, [eax]; // Use mmx reg for 64-bit atomic moves
movq value, mm0;
emms; // Empty mmx state to enable FP registers
}
return value;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = NoBarrier_Load(ptr);
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
#endif // defined(_WIN64) || defined(__MINGW64__)
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_WINDOWS_H_

View File

@ -1,112 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This module gets enough CPU information to optimize the
* atomicops module on x86.
*/
#include "base/atomicops.h"
#include "base/basictypes.h"
#include "base/googleinit.h"
#include "base/logging.h"
#include <string.h>
// This file only makes sense with atomicops-internals-x86.h -- it
// depends on structs that are defined in that file. If atomicops.h
// doesn't sub-include that file, then we aren't needed, and shouldn't
// try to do anything.
#ifdef BASE_ATOMICOPS_INTERNALS_X86_H_
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
// of the global offset table. To avoid breaking such executables, this code
// must preserve that register's value across cpuid instructions.
#if defined(__i386__)
#define cpuid(a, b, c, d, inp) \
asm ("mov %%ebx, %%edi\n" \
"cpuid\n" \
"xchg %%edi, %%ebx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#elif defined (__x86_64__)
#define cpuid(a, b, c, d, inp) \
asm ("mov %%rbx, %%rdi\n" \
"cpuid\n" \
"xchg %%rdi, %%rbx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#endif
#if defined(cpuid) // initialize the struct only on x86
// Set the flags so that code will run correctly and conservatively
// until InitGoogle() is called.
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // no SSE2
false // no cmpxchg16b
};
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
static void AtomicOps_Internalx86CPUFeaturesInit() {
uint32 eax;
uint32 ebx;
uint32 ecx;
uint32 edx;
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
memcpy(vendor, &ebx, 4);
memcpy(vendor + 4, &edx, 4);
memcpy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax
cpuid(eax, ebx, ecx, edx, 1);
int family = (eax >> 8) & 0xf; // family and model fields
int model = (eax >> 4) & 0xf;
if (family == 0xf) { // use extended family and model fields
family += (eax >> 20) & 0xff;
model += ((eax >> 16) & 0xf) << 4;
}
// edx bit 26 is SSE2 which we use to tell use whether we can use mfence
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
// ecx bit 13 indicates whether the cmpxchg16b instruction is supported
AtomicOps_Internalx86CPUFeatures.has_cmpxchg16b = ((ecx >> 13) & 1);
}
REGISTER_MODULE_INITIALIZER(atomicops_x86, {
AtomicOps_Internalx86CPUFeaturesInit();
});
#endif
#endif /* ifdef BASE_ATOMICOPS_INTERNALS_X86_H_ */

View File

@ -1,391 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// Implementation of atomic operations for x86. This file should not
// be included directly. Clients should instead include
// "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_X86_H_
#define BASE_ATOMICOPS_INTERNALS_X86_H_
#include "base/basictypes.h"
typedef int32_t Atomic32;
#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
// NOTE(vchen): x86 does not need to define AtomicWordCastType, because it
// already matches Atomic32 or Atomic64, depending on the platform.
// This struct is not part of the public API of this module; clients may not
// use it.
// Features of this x86. Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
bool has_sse2; // Processor has SSE2.
bool has_cmpxchg16b; // Processor supports cmpxchg16b instruction.
};
ATTRIBUTE_VISIBILITY_HIDDEN
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace base {
namespace subtle {
typedef int64_t Atomic64;
// 32-bit low-level operations on any platform.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
__asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_val = NoBarrier_AtomicExchange(ptr, new_value);
return old_val;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// xchgl already has release memory barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
#if defined(__x86_64__)
// 64-bit implementations of memory barrier can be simpler, because it
// "mfence" is guaranteed to exist.
inline void MemoryBarrier() {
__asm__ __volatile__("mfence" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
#else
inline void MemoryBarrier() {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
__asm__ __volatile__("mfence" : : : "memory");
} else { // mfence is faster but not present on PIII
Atomic32 x = 0;
Acquire_AtomicExchange(&x, 0);
}
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
*ptr = value;
__asm__ __volatile__("mfence" : : : "memory");
} else {
Acquire_AtomicExchange(ptr, value);
}
}
#endif
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier.
// See comments in Atomic64 version of Release_Store(), below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
// See comments in Atomic64 version of Release_Store(), below.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(__x86_64__)
// 64-bit low-level operations on 64-bit platform.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
__asm__ __volatile__("lock; cmpxchgq %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
__asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_value);
return old_val;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// xchgq already has release memory barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier
// for current AMD/Intel chips as of Jan 2008.
// See also Acquire_Load(), below.
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
//
// x86 stores/loads fail to act as barriers for a few instructions (clflush
// maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
// not generated by the compiler, and are rare. Users of these instructions
// need to know about cache behaviour in any case since all of these involve
// either flushing cache lines or non-temporal cache hints.
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
// for current AMD/Intel chips as of Jan 2008.
// See also Release_Store(), above.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#else // defined(__x86_64__)
// 64-bit low-level operations on 32-bit platform.
#if !((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
// For compilers older than gcc 4.1, we use inline asm.
//
// Potential pitfalls:
//
// 1. %ebx points to Global offset table (GOT) with -fPIC.
// We need to preserve this register.
// 2. When explicit registers are used in inline asm, the
// compiler may not be aware of it and might try to reuse
// the same register for another argument which has constraints
// that allow it ("r" for example).
inline Atomic64 __sync_val_compare_and_swap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
__asm__ __volatile__("push %%ebx\n\t"
"movl (%3), %%ebx\n\t" // Move 64-bit new_value into
"movl 4(%3), %%ecx\n\t" // ecx:ebx
"lock; cmpxchg8b (%1)\n\t"// If edx:eax (old_value) same
"pop %%ebx\n\t"
: "=A" (prev) // as contents of ptr:
: "D" (ptr), // ecx:ebx => ptr
"0" (old_value), // else:
"S" (&new_value) // old *ptr => edx:eax
: "memory", "%ecx");
return prev;
}
#endif // Compiler < gcc-4.1
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_val,
Atomic64 new_val) {
return __sync_val_compare_and_swap(ptr, old_val, new_val);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_val) {
Atomic64 old_val;
do {
old_val = *ptr;
} while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val);
return old_val;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_val) {
Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_val);
return old_val;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_val) {
return NoBarrier_AtomicExchange(ptr, new_val);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Empty mmx state/Reset FP regs
: "=m" (*ptr)
: "m" (value)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
ATOMICOPS_COMPILER_BARRIER();
NoBarrier_Store(ptr, value);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
Atomic64 value;
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Empty mmx state/Reset FP regs
: "=m" (value)
: "m" (*ptr)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
return value;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = NoBarrier_Load(ptr);
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
#endif // defined(__x86_64__)
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return x;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
} // namespace base::subtle
} // namespace base
#undef ATOMICOPS_COMPILER_BARRIER
#endif // BASE_ATOMICOPS_INTERNALS_X86_H_

View File

@ -1,399 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// For atomic operations on statistics counters, see atomic_stats_counter.h.
// For atomic operations on sequence numbers, see atomic_sequence_num.h.
// For atomic operations on reference counts, see atomic_refcount.h.
// Some fast atomic operations -- typically with machine-dependent
// implementations. This file may need editing as Google code is
// ported to different architectures.
// The routines exported by this module are subtle. If you use them, even if
// you get the code right, it will depend on careful reasoning about atomicity
// and memory ordering; it will be less readable, and harder to maintain. If
// you plan to use these routines, you should have a good reason, such as solid
// evidence that performance would otherwise suffer, or there being no
// alternative. You should assume only properties explicitly guaranteed by the
// specifications in this file. You are almost certainly _not_ writing code
// just for the x86; if you assume x86 semantics, x86 hardware bugs and
// implementations on other archtectures will cause your code to break. If you
// do not know what you are doing, avoid these routines, and use a Mutex.
//
// These following lower-level operations are typically useful only to people
// implementing higher-level synchronization operations like spinlocks,
// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
// a store with appropriate memory-ordering instructions. "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
//
// It is incorrect to make direct assignments to/from an atomic variable.
// You should use one of the Load or Store routines. The NoBarrier
// versions are provided when no barriers are needed:
// NoBarrier_Store()
// NoBarrier_Load()
// Although there are currently no compiler enforcement, you are encouraged
// to use these. Moreover, if you choose to use base::subtle::Atomic64 type,
// you MUST use one of the Load or Store routines to get correct behavior
// on 32-bit platforms.
//
// The intent is eventually to put all of these routines in namespace
// base::subtle
#ifndef THREAD_ATOMICOPS_H_
#define THREAD_ATOMICOPS_H_
#include "../config.h"
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
// ------------------------------------------------------------------------
// Include the platform specific implementations of the types
// and operations listed below. Implementations are to provide Atomic32
// and Atomic64 operations. If there is a mismatch between intptr_t and
// the Atomic32 or Atomic64 types for a platform, the platform-specific header
// should define the macro, AtomicWordCastType in a clause similar to the
// following:
// #if ...pointers are 64 bits...
// # define AtomicWordCastType base::subtle::Atomic64
// #else
// # define AtomicWordCastType Atomic32
// #endif
// TODO(csilvers): figure out ARCH_PIII/ARCH_K8 (perhaps via ./configure?)
// ------------------------------------------------------------------------
#include "base/arm_instruction_set_select.h"
#define GCC_VERSION (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
#define CLANG_VERSION (__clang_major__ * 10000 \
+ __clang_minor__ * 100 \
+ __clang_patchlevel__)
#if defined(TCMALLOC_PREFER_GCC_ATOMICS) && defined(__GNUC__) && GCC_VERSION >= 40700
#include "base/atomicops-internals-gcc.h"
#elif defined(TCMALLOC_PREFER_GCC_ATOMICS) && defined(__clang__) && CLANG_VERSION >= 30400
#include "base/atomicops-internals-gcc.h"
#elif defined(__MACH__) && defined(__APPLE__)
#include "base/atomicops-internals-macosx.h"
#elif defined(__GNUC__) && defined(ARMV6)
#include "base/atomicops-internals-arm-v6plus.h"
#elif defined(ARMV3)
#include "base/atomicops-internals-arm-generic.h"
#elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__))
#include "base/atomicops-internals-x86.h"
#elif defined(_WIN32)
#include "base/atomicops-internals-windows.h"
#elif defined(__linux__) && defined(__PPC__)
#include "base/atomicops-internals-linuxppc.h"
#elif defined(__GNUC__) && defined(__mips__)
#include "base/atomicops-internals-mips.h"
#elif defined(__GNUC__) && GCC_VERSION >= 40700
#include "base/atomicops-internals-gcc.h"
#elif defined(__clang__) && CLANG_VERSION >= 30400
#include "base/atomicops-internals-gcc.h"
#else
#error You need to implement atomic operations for this architecture
#endif
// Signed type that can hold a pointer and supports the atomic ops below, as
// well as atomic loads and stores. Instances must be naturally-aligned.
typedef intptr_t AtomicWord;
#ifdef AtomicWordCastType
// ------------------------------------------------------------------------
// This section is needed only when explicit type casting is required to
// cast AtomicWord to one of the basic atomic types (Atomic64 or Atomic32).
// It also serves to document the AtomicWord interface.
// ------------------------------------------------------------------------
namespace base {
namespace subtle {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return NoBarrier_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return NoBarrier_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord Acquire_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return Acquire_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord Release_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return Release_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Acquire_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Release_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
NoBarrier_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Acquire_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Release_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
return NoBarrier_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return base::subtle::Acquire_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return base::subtle::Release_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
} // namespace base::subtle
} // namespace base
#endif // AtomicWordCastType
// ------------------------------------------------------------------------
// Commented out type definitions and method declarations for documentation
// of the interface provided by this module.
// ------------------------------------------------------------------------
#if 0
// Signed 32-bit type that supports the atomic ops below, as well as atomic
// loads and stores. Instances must be naturally aligned. This type differs
// from AtomicWord in 64-bit binaries where AtomicWord is 64-bits.
typedef int32_t Atomic32;
// Corresponding operations on Atomic32
namespace base {
namespace subtle {
// Signed 64-bit type that supports the atomic ops below, as well as atomic
// loads and stores. Instances must be naturally aligned. This type differs
// from AtomicWord in 32-bit binaries where AtomicWord is 32-bits.
typedef int64_t Atomic64;
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
Atomic32 Release_Load(volatile const Atomic32* ptr);
// Corresponding operations on Atomic64
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
Atomic64 Release_Load(volatile const Atomic64* ptr);
} // namespace base::subtle
} // namespace base
void MemoryBarrier();
#endif // 0
// ------------------------------------------------------------------------
// The following are to be deprecated when all uses have been changed to
// use the base::subtle namespace.
// ------------------------------------------------------------------------
#ifdef AtomicWordCastType
// AtomicWord versions to be deprecated
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Acquire_Store(ptr, value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Release_Store(ptr, value);
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return base::subtle::Acquire_Load(ptr);
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return base::subtle::Release_Load(ptr);
}
#endif // AtomicWordCastType
// 32-bit Acquire/Release operations to be deprecated.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
base::subtle::Acquire_Store(ptr, value);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
return base::subtle::Release_Store(ptr, value);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return base::subtle::Acquire_Load(ptr);
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return base::subtle::Release_Load(ptr);
}
#ifdef BASE_HAS_ATOMIC64
// 64-bit Acquire/Release operations to be deprecated.
inline base::subtle::Atomic64 Acquire_CompareAndSwap(
volatile base::subtle::Atomic64* ptr,
base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline base::subtle::Atomic64 Release_CompareAndSwap(
volatile base::subtle::Atomic64* ptr,
base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
}
inline void Acquire_Store(
volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
base::subtle::Acquire_Store(ptr, value);
}
inline void Release_Store(
volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
return base::subtle::Release_Store(ptr, value);
}
inline base::subtle::Atomic64 Acquire_Load(
volatile const base::subtle::Atomic64* ptr) {
return base::subtle::Acquire_Load(ptr);
}
inline base::subtle::Atomic64 Release_Load(
volatile const base::subtle::Atomic64* ptr) {
return base::subtle::Release_Load(ptr);
}
#endif // BASE_HAS_ATOMIC64
#endif // THREAD_ATOMICOPS_H_

View File

@ -1,408 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef _BASICTYPES_H_
#define _BASICTYPES_H_
#include "../config.h"
#include <string.h> // for memcpy()
#ifdef HAVE_INTTYPES_H
#include <inttypes.h> // gets us PRId64, etc
#endif
// To use this in an autoconf setting, make sure you run the following
// autoconf macros:
// AC_HEADER_STDC /* for stdint_h and inttypes_h */
// AC_CHECK_TYPES([__int64]) /* defined in some windows platforms */
#ifdef HAVE_INTTYPES_H
#include <inttypes.h> // uint16_t might be here; PRId64 too.
#endif
#ifdef HAVE_STDINT_H
#include <stdint.h> // to get uint16_t (ISO naming madness)
#endif
#include <sys/types.h> // our last best hope for uint16_t
// Standard typedefs
// All Google code is compiled with -funsigned-char to make "char"
// unsigned. Google code therefore doesn't need a "uchar" type.
// TODO(csilvers): how do we make sure unsigned-char works on non-gcc systems?
typedef signed char schar;
typedef int8_t int8;
typedef int16_t int16;
typedef int32_t int32;
typedef int64_t int64;
// NOTE: unsigned types are DANGEROUS in loops and other arithmetical
// places. Use the signed types unless your variable represents a bit
// pattern (eg a hash value) or you really need the extra bit. Do NOT
// use 'unsigned' to express "this value should always be positive";
// use assertions for this.
typedef uint8_t uint8;
typedef uint16_t uint16;
typedef uint32_t uint32;
typedef uint64_t uint64;
const uint16 kuint16max = ( (uint16) 0xFFFF);
const uint32 kuint32max = ( (uint32) 0xFFFFFFFF);
const uint64 kuint64max = ( (((uint64) kuint32max) << 32) | kuint32max );
const int8 kint8max = ( ( int8) 0x7F);
const int16 kint16max = ( ( int16) 0x7FFF);
const int32 kint32max = ( ( int32) 0x7FFFFFFF);
const int64 kint64max = ( ((( int64) kint32max) << 32) | kuint32max );
const int8 kint8min = ( ( int8) 0x80);
const int16 kint16min = ( ( int16) 0x8000);
const int32 kint32min = ( ( int32) 0x80000000);
const int64 kint64min = ( (((uint64) kint32min) << 32) | 0 );
// Define the "portable" printf and scanf macros, if they're not
// already there (via the inttypes.h we #included above, hopefully).
// Mostly it's old systems that don't support inttypes.h, so we assume
// they're 32 bit.
#ifndef PRIx64
#define PRIx64 "llx"
#endif
#ifndef SCNx64
#define SCNx64 "llx"
#endif
#ifndef PRId64
#define PRId64 "lld"
#endif
#ifndef SCNd64
#define SCNd64 "lld"
#endif
#ifndef PRIu64
#define PRIu64 "llu"
#endif
#ifndef PRIxPTR
#define PRIxPTR "lx"
#endif
// Also allow for printing of a pthread_t.
#define GPRIuPTHREAD "lu"
#define GPRIxPTHREAD "lx"
#if defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__APPLE__) || defined(__FreeBSD__)
#define PRINTABLE_PTHREAD(pthreadt) reinterpret_cast<uintptr_t>(pthreadt)
#else
#define PRINTABLE_PTHREAD(pthreadt) pthreadt
#endif
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
// An alternate name that leaves out the moral judgment... :-)
#define DISALLOW_COPY_AND_ASSIGN(TypeName) DISALLOW_EVIL_CONSTRUCTORS(TypeName)
// The COMPILE_ASSERT macro can be used to verify that a compile time
// expression is true. For example, you could use it to verify the
// size of a static array:
//
// COMPILE_ASSERT(sizeof(num_content_type_names) == sizeof(int),
// content_type_names_incorrect_size);
//
// or to make sure a struct is smaller than a certain size:
//
// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
//
// The second argument to the macro is the name of the variable. If
// the expression is false, most compilers will issue a warning/error
// containing the name of the variable.
//
// Implementation details of COMPILE_ASSERT:
//
// - COMPILE_ASSERT works by defining an array type that has -1
// elements (and thus is invalid) when the expression is false.
//
// - The simpler definition
//
// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
//
// does not work, as gcc supports variable-length arrays whose sizes
// are determined at run-time (this is gcc's extension and not part
// of the C++ standard). As a result, gcc fails to reject the
// following code with the simple definition:
//
// int foo;
// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
// // not a compile-time constant.
//
// - By using the type CompileAssert<(bool(expr))>, we ensures that
// expr is a compile-time constant. (Template arguments must be
// determined at compile-time.)
//
// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
//
// CompileAssert<bool(expr)>
//
// instead, these compilers will refuse to compile
//
// COMPILE_ASSERT(5 > 0, some_message);
//
// (They seem to think the ">" in "5 > 0" marks the end of the
// template argument list.)
//
// - The array size is (bool(expr) ? 1 : -1), instead of simply
//
// ((expr) ? 1 : -1).
//
// This is to avoid running into a bug in MS VC 7.1, which
// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
template <bool>
struct CompileAssert {
};
#ifdef HAVE___ATTRIBUTE__
# define ATTRIBUTE_UNUSED __attribute__((unused))
#else
# define ATTRIBUTE_UNUSED
#endif
#if defined(HAVE___ATTRIBUTE__) && defined(HAVE_TLS)
#define ATTR_INITIAL_EXEC __attribute__ ((tls_model ("initial-exec")))
#else
#define ATTR_INITIAL_EXEC
#endif
#define COMPILE_ASSERT(expr, msg) \
typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] ATTRIBUTE_UNUSED
#define arraysize(a) (sizeof(a) / sizeof(*(a)))
#define OFFSETOF_MEMBER(strct, field) \
(reinterpret_cast<char*>(&reinterpret_cast<strct*>(16)->field) - \
reinterpret_cast<char*>(16))
// bit_cast<Dest,Source> implements the equivalent of
// "*reinterpret_cast<Dest*>(&source)".
//
// The reinterpret_cast method would produce undefined behavior
// according to ISO C++ specification section 3.10 -15 -.
// bit_cast<> calls memcpy() which is blessed by the standard,
// especially by the example in section 3.9.
//
// Fortunately memcpy() is very fast. In optimized mode, with a
// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
// code with the minimal amount of data movement. On a 32-bit system,
// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
// compiles to two loads and two stores.
template <class Dest, class Source>
inline Dest bit_cast(const Source& source) {
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes);
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
}
// bit_store<Dest,Source> implements the equivalent of
// "dest = *reinterpret_cast<Dest*>(&source)".
//
// This prevents undefined behavior when the dest pointer is unaligned.
template <class Dest, class Source>
inline void bit_store(Dest *dest, const Source *source) {
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes);
memcpy(dest, source, sizeof(Dest));
}
#ifdef HAVE___ATTRIBUTE__
# define ATTRIBUTE_WEAK __attribute__((weak))
# define ATTRIBUTE_NOINLINE __attribute__((noinline))
#else
# define ATTRIBUTE_WEAK
# define ATTRIBUTE_NOINLINE
#endif
#if defined(HAVE___ATTRIBUTE__) && defined(__ELF__)
# define ATTRIBUTE_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
#else
# define ATTRIBUTE_VISIBILITY_HIDDEN
#endif
// Section attributes are supported for both ELF and Mach-O, but in
// very different ways. Here's the API we provide:
// 1) ATTRIBUTE_SECTION: put this with the declaration of all functions
// you want to be in the same linker section
// 2) DEFINE_ATTRIBUTE_SECTION_VARS: must be called once per unique
// name. You want to make sure this is executed before any
// DECLARE_ATTRIBUTE_SECTION_VARS; the easiest way is to put them
// in the same .cc file. Put this call at the global level.
// 3) INIT_ATTRIBUTE_SECTION_VARS: you can scatter calls to this in
// multiple places to help ensure execution before any
// DECLARE_ATTRIBUTE_SECTION_VARS. You must have at least one
// DEFINE, but you can have many INITs. Put each in its own scope.
// 4) DECLARE_ATTRIBUTE_SECTION_VARS: must be called before using
// ATTRIBUTE_SECTION_START or ATTRIBUTE_SECTION_STOP on a name.
// Put this call at the global level.
// 5) ATTRIBUTE_SECTION_START/ATTRIBUTE_SECTION_STOP: call this to say
// where in memory a given section is. All functions declared with
// ATTRIBUTE_SECTION are guaranteed to be between START and STOP.
#if defined(HAVE___ATTRIBUTE__) && defined(__ELF__)
# define ATTRIBUTE_SECTION(name) __attribute__ ((section (#name)))
// Weak section declaration to be used as a global declaration
// for ATTRIBUTE_SECTION_START|STOP(name) to compile and link
// even without functions with ATTRIBUTE_SECTION(name).
# define DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char __start_##name[] ATTRIBUTE_WEAK; \
extern char __stop_##name[] ATTRIBUTE_WEAK
# define INIT_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF
# define DEFINE_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF
// Return void* pointers to start/end of a section of code with functions
// having ATTRIBUTE_SECTION(name), or 0 if no such function exists.
// One must DECLARE_ATTRIBUTE_SECTION(name) for this to compile and link.
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name))
# define HAVE_ATTRIBUTE_SECTION_START 1
#elif defined(HAVE___ATTRIBUTE__) && defined(__MACH__)
# define ATTRIBUTE_SECTION(name) __attribute__ ((section ("__TEXT, " #name)))
#include <mach-o/getsect.h>
#include <mach-o/dyld.h>
class AssignAttributeStartEnd {
public:
AssignAttributeStartEnd(const char* name, char** pstart, char** pend) {
// Find out what dynamic library name is defined in
if (_dyld_present()) {
for (int i = _dyld_image_count() - 1; i >= 0; --i) {
const mach_header* hdr = _dyld_get_image_header(i);
#ifdef MH_MAGIC_64
if (hdr->magic == MH_MAGIC_64) {
uint64_t len;
*pstart = getsectdatafromheader_64((mach_header_64*)hdr,
"__TEXT", name, &len);
if (*pstart) { // NULL if not defined in this dynamic library
*pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc
*pend = *pstart + len;
return;
}
}
#endif
if (hdr->magic == MH_MAGIC) {
uint32_t len;
*pstart = getsectdatafromheader(hdr, "__TEXT", name, &len);
if (*pstart) { // NULL if not defined in this dynamic library
*pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc
*pend = *pstart + len;
return;
}
}
}
}
// If we get here, not defined in a dll at all. See if defined statically.
unsigned long len; // don't ask me why this type isn't uint32_t too...
*pstart = getsectdata("__TEXT", name, &len);
*pend = *pstart + len;
}
};
#define DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char* __start_##name; \
extern char* __stop_##name
#define INIT_ATTRIBUTE_SECTION_VARS(name) \
DECLARE_ATTRIBUTE_SECTION_VARS(name); \
static const AssignAttributeStartEnd __assign_##name( \
#name, &__start_##name, &__stop_##name)
#define DEFINE_ATTRIBUTE_SECTION_VARS(name) \
char* __start_##name, *__stop_##name; \
INIT_ATTRIBUTE_SECTION_VARS(name)
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name))
# define HAVE_ATTRIBUTE_SECTION_START 1
#else // not HAVE___ATTRIBUTE__ && __ELF__, nor HAVE___ATTRIBUTE__ && __MACH__
# define ATTRIBUTE_SECTION(name)
# define DECLARE_ATTRIBUTE_SECTION_VARS(name)
# define INIT_ATTRIBUTE_SECTION_VARS(name)
# define DEFINE_ATTRIBUTE_SECTION_VARS(name)
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(0))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(0))
#endif // HAVE___ATTRIBUTE__ and __ELF__ or __MACH__
#if defined(HAVE___ATTRIBUTE__)
# if (defined(__i386__) || defined(__x86_64__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
# elif (defined(__PPC__) || defined(__PPC64__))
# define CACHELINE_ALIGNED __attribute__((aligned(16)))
# elif (defined(__arm__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
// some ARMs have shorter cache lines (ARM1176JZF-S is 32 bytes for example) but obviously 64-byte aligned implies 32-byte aligned
# elif (defined(__mips__))
# define CACHELINE_ALIGNED __attribute__((aligned(128)))
# elif (defined(__aarch64__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
// implementation specific, Cortex-A53 and 57 should have 64 bytes
# elif (defined(__s390__))
# define CACHELINE_ALIGNED __attribute__((aligned(256)))
# else
# error Could not determine cache line length - unknown architecture
# endif
#else
# define CACHELINE_ALIGNED
#endif // defined(HAVE___ATTRIBUTE__) && (__i386__ || __x86_64__)
// Structure for discovering alignment
union MemoryAligner {
void* p;
double d;
size_t s;
} CACHELINE_ALIGNED;
// The following enum should be used only as a constructor argument to indicate
// that the variable has static storage class, and that the constructor should
// do nothing to its state. It indicates to the reader that it is legal to
// declare a static nistance of the class, provided the constructor is given
// the base::LINKER_INITIALIZED argument. Normally, it is unsafe to declare a
// static variable that has a constructor or a destructor because invocation
// order is undefined. However, IF the type can be initialized by filling with
// zeroes (which the loader does for static variables), AND the destructor also
// does nothing to the storage, then a constructor declared as
// explicit MyClass(base::LinkerInitialized x) {}
// and invoked as
// static MyClass my_variable_name(base::LINKER_INITIALIZED);
namespace base {
enum LinkerInitialized { LINKER_INITIALIZED };
}
#endif // _BASICTYPES_H_

View File

@ -1,166 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file is a compatibility layer that defines Google's version of
// command line flags that are used for configuration.
//
// We put flags into their own namespace. It is purposefully
// named in an opaque way that people should have trouble typing
// directly. The idea is that DEFINE puts the flag in the weird
// namespace, and DECLARE imports the flag from there into the
// current namespace. The net result is to force people to use
// DECLARE to get access to a flag, rather than saying
// extern bool FLAGS_logtostderr;
// or some such instead. We want this so we can put extra
// functionality (like sanity-checking) in DECLARE if we want,
// and make sure it is picked up everywhere.
//
// We also put the type of the variable in the namespace, so that
// people can't DECLARE_int32 something that they DEFINE_bool'd
// elsewhere.
#ifndef BASE_COMMANDLINEFLAGS_H_
#define BASE_COMMANDLINEFLAGS_H_
#include "../config.h"
#include <string>
#include <string.h> // for memchr
#include <stdlib.h> // for getenv
#include "base/basictypes.h"
#define DECLARE_VARIABLE(type, name) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
extern PERFTOOLS_DLL_DECL type FLAGS_##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
#define DEFINE_VARIABLE(type, name, value, meaning) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
PERFTOOLS_DLL_DECL type FLAGS_##name(value); \
char FLAGS_no##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
// bool specialization
#define DECLARE_bool(name) \
DECLARE_VARIABLE(bool, name)
#define DEFINE_bool(name, value, meaning) \
DEFINE_VARIABLE(bool, name, value, meaning)
// int32 specialization
#define DECLARE_int32(name) \
DECLARE_VARIABLE(int32, name)
#define DEFINE_int32(name, value, meaning) \
DEFINE_VARIABLE(int32, name, value, meaning)
// int64 specialization
#define DECLARE_int64(name) \
DECLARE_VARIABLE(int64, name)
#define DEFINE_int64(name, value, meaning) \
DEFINE_VARIABLE(int64, name, value, meaning)
#define DECLARE_uint64(name) \
DECLARE_VARIABLE(uint64, name)
#define DEFINE_uint64(name, value, meaning) \
DEFINE_VARIABLE(uint64, name, value, meaning)
// double specialization
#define DECLARE_double(name) \
DECLARE_VARIABLE(double, name)
#define DEFINE_double(name, value, meaning) \
DEFINE_VARIABLE(double, name, value, meaning)
// Special case for string, because we have to specify the namespace
// std::string, which doesn't play nicely with our FLAG__namespace hackery.
#define DECLARE_string(name) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
extern std::string FLAGS_##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
#define DEFINE_string(name, value, meaning) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
std::string FLAGS_##name(value); \
char FLAGS_no##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
// implemented in sysinfo.cc
namespace tcmalloc {
namespace commandlineflags {
inline bool StringToBool(const char *value, bool def) {
if (!value) {
return def;
}
return memchr("tTyY1\0", value[0], 6) != NULL;
}
inline int StringToInt(const char *value, int def) {
if (!value) {
return def;
}
return strtol(value, NULL, 10);
}
inline long long StringToLongLong(const char *value, long long def) {
if (!value) {
return def;
}
return strtoll(value, NULL, 10);
}
inline double StringToDouble(const char *value, double def) {
if (!value) {
return def;
}
return strtod(value, NULL);
}
}
}
// These macros (could be functions, but I don't want to bother with a .cc
// file), make it easier to initialize flags from the environment.
#define EnvToString(envname, dflt) \
(!getenv(envname) ? (dflt) : getenv(envname))
#define EnvToBool(envname, dflt) \
tcmalloc::commandlineflags::StringToBool(getenv(envname), dflt)
#define EnvToInt(envname, dflt) \
tcmalloc::commandlineflags::StringToInt(getenv(envname), dflt)
#define EnvToInt64(envname, dflt) \
tcmalloc::commandlineflags::StringToLongLong(getenv(envname), dflt)
#define EnvToDouble(envname, dflt) \
tcmalloc::commandlineflags::StringToDouble(getenv(envname), dflt)
#endif // BASE_COMMANDLINEFLAGS_H_

View File

@ -1,179 +0,0 @@
/* Copyright (c) 2008-2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Kostya Serebryany
*/
#ifdef __cplusplus
# error "This file should be built as pure C to avoid name mangling"
#endif
#include "config.h"
#include <stdlib.h>
#include <string.h>
#include "base/dynamic_annotations.h"
#include "getenv_safe.h" // for TCMallocGetenvSafe
#ifdef __GNUC__
/* valgrind.h uses gcc extensions so it won't build with other compilers */
# ifdef HAVE_VALGRIND_H /* prefer the user's copy if they have it */
# include <valgrind.h>
# else /* otherwise just use the copy that we have */
# include "third_party/valgrind.h"
# endif
#endif
/* Compiler-based ThreadSanitizer defines
DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1
and provides its own definitions of the functions. */
#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL
# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0
#endif
/* Each function is empty and called (via a macro) only in debug mode.
The arguments are captured by dynamic tools at runtime. */
#if DYNAMIC_ANNOTATIONS_ENABLED == 1 \
&& DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
void AnnotateRWLockCreate(const char *file, int line,
const volatile void *lock){}
void AnnotateRWLockDestroy(const char *file, int line,
const volatile void *lock){}
void AnnotateRWLockAcquired(const char *file, int line,
const volatile void *lock, long is_w){}
void AnnotateRWLockReleased(const char *file, int line,
const volatile void *lock, long is_w){}
void AnnotateBarrierInit(const char *file, int line,
const volatile void *barrier, long count,
long reinitialization_allowed) {}
void AnnotateBarrierWaitBefore(const char *file, int line,
const volatile void *barrier) {}
void AnnotateBarrierWaitAfter(const char *file, int line,
const volatile void *barrier) {}
void AnnotateBarrierDestroy(const char *file, int line,
const volatile void *barrier) {}
void AnnotateCondVarWait(const char *file, int line,
const volatile void *cv,
const volatile void *lock){}
void AnnotateCondVarSignal(const char *file, int line,
const volatile void *cv){}
void AnnotateCondVarSignalAll(const char *file, int line,
const volatile void *cv){}
void AnnotatePublishMemoryRange(const char *file, int line,
const volatile void *address,
long size){}
void AnnotateUnpublishMemoryRange(const char *file, int line,
const volatile void *address,
long size){}
void AnnotatePCQCreate(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQDestroy(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQPut(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQGet(const char *file, int line,
const volatile void *pcq){}
void AnnotateNewMemory(const char *file, int line,
const volatile void *mem,
long size){}
void AnnotateExpectRace(const char *file, int line,
const volatile void *mem,
const char *description){}
void AnnotateBenignRace(const char *file, int line,
const volatile void *mem,
const char *description){}
void AnnotateBenignRaceSized(const char *file, int line,
const volatile void *mem,
long size,
const char *description) {}
void AnnotateMutexIsUsedAsCondVar(const char *file, int line,
const volatile void *mu){}
void AnnotateTraceMemory(const char *file, int line,
const volatile void *arg){}
void AnnotateThreadName(const char *file, int line,
const char *name){}
void AnnotateIgnoreReadsBegin(const char *file, int line){}
void AnnotateIgnoreReadsEnd(const char *file, int line){}
void AnnotateIgnoreWritesBegin(const char *file, int line){}
void AnnotateIgnoreWritesEnd(const char *file, int line){}
void AnnotateEnableRaceDetection(const char *file, int line, int enable){}
void AnnotateNoOp(const char *file, int line,
const volatile void *arg){}
void AnnotateFlushState(const char *file, int line){}
#endif /* DYNAMIC_ANNOTATIONS_ENABLED == 1
&& DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
static int GetRunningOnValgrind(void) {
#ifdef RUNNING_ON_VALGRIND
if (RUNNING_ON_VALGRIND) return 1;
#endif
const char *running_on_valgrind_str = TCMallocGetenvSafe("RUNNING_ON_VALGRIND");
if (running_on_valgrind_str) {
return strcmp(running_on_valgrind_str, "0") != 0;
}
return 0;
}
/* See the comments in dynamic_annotations.h */
int RunningOnValgrind(void) {
static volatile int running_on_valgrind = -1;
int local_running_on_valgrind = running_on_valgrind;
/* C doesn't have thread-safe initialization of statics, and we
don't want to depend on pthread_once here, so hack it. */
ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack");
if (local_running_on_valgrind == -1)
running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
return local_running_on_valgrind;
}
#endif /* DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
/* See the comments in dynamic_annotations.h */
double ValgrindSlowdown(void) {
/* Same initialization hack as in RunningOnValgrind(). */
static volatile double slowdown = 0.0;
double local_slowdown = slowdown;
ANNOTATE_BENIGN_RACE(&slowdown, "safe hack");
if (RunningOnValgrind() == 0) {
return 1.0;
}
if (local_slowdown == 0.0) {
char *env = getenv("VALGRIND_SLOWDOWN");
slowdown = local_slowdown = env ? atof(env) : 50.0;
}
return local_slowdown;
}

View File

@ -1,627 +0,0 @@
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Kostya Serebryany
*/
/* This file defines dynamic annotations for use with dynamic analysis
tool such as valgrind, PIN, etc.
Dynamic annotation is a source code annotation that affects
the generated code (that is, the annotation is not a comment).
Each such annotation is attached to a particular
instruction and/or to a particular object (address) in the program.
The annotations that should be used by users are macros in all upper-case
(e.g., ANNOTATE_NEW_MEMORY).
Actual implementation of these macros may differ depending on the
dynamic analysis tool being used.
See http://code.google.com/p/data-race-test/ for more information.
This file supports the following dynamic analysis tools:
- None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero).
Macros are defined empty.
- ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1).
Macros are defined as calls to non-inlinable empty functions
that are intercepted by Valgrind. */
#ifndef BASE_DYNAMIC_ANNOTATIONS_H_
#define BASE_DYNAMIC_ANNOTATIONS_H_
#ifndef DYNAMIC_ANNOTATIONS_ENABLED
# define DYNAMIC_ANNOTATIONS_ENABLED 0
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0
/* -------------------------------------------------------------
Annotations useful when implementing condition variables such as CondVar,
using conditional critical sections (Await/LockWhen) and when constructing
user-defined synchronization mechanisms.
The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can
be used to define happens-before arcs in user-defined synchronization
mechanisms: the race detector will infer an arc from the former to the
latter when they share the same argument pointer.
Example 1 (reference counting):
void Unref() {
ANNOTATE_HAPPENS_BEFORE(&refcount_);
if (AtomicDecrementByOne(&refcount_) == 0) {
ANNOTATE_HAPPENS_AFTER(&refcount_);
delete this;
}
}
Example 2 (message queue):
void MyQueue::Put(Type *e) {
MutexLock lock(&mu_);
ANNOTATE_HAPPENS_BEFORE(e);
PutElementIntoMyQueue(e);
}
Type *MyQueue::Get() {
MutexLock lock(&mu_);
Type *e = GetElementFromMyQueue();
ANNOTATE_HAPPENS_AFTER(e);
return e;
}
Note: when possible, please use the existing reference counting and message
queue implementations instead of inventing new ones. */
/* Report that wait on the condition variable at address "cv" has succeeded
and the lock at address "lock" is held. */
#define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
AnnotateCondVarWait(__FILE__, __LINE__, cv, lock)
/* Report that wait on the condition variable at "cv" has succeeded. Variant
w/o lock. */
#define ANNOTATE_CONDVAR_WAIT(cv) \
AnnotateCondVarWait(__FILE__, __LINE__, cv, NULL)
/* Report that we are about to signal on the condition variable at address
"cv". */
#define ANNOTATE_CONDVAR_SIGNAL(cv) \
AnnotateCondVarSignal(__FILE__, __LINE__, cv)
/* Report that we are about to signal_all on the condition variable at "cv". */
#define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
AnnotateCondVarSignalAll(__FILE__, __LINE__, cv)
/* Annotations for user-defined synchronization mechanisms. */
#define ANNOTATE_HAPPENS_BEFORE(obj) ANNOTATE_CONDVAR_SIGNAL(obj)
#define ANNOTATE_HAPPENS_AFTER(obj) ANNOTATE_CONDVAR_WAIT(obj)
/* Report that the bytes in the range [pointer, pointer+size) are about
to be published safely. The race checker will create a happens-before
arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
subsequent accesses to this memory.
Note: this annotation may not work properly if the race detector uses
sampling, i.e. does not observe all memory accesses.
*/
#define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
AnnotatePublishMemoryRange(__FILE__, __LINE__, pointer, size)
/* DEPRECATED. Don't use it. */
#define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \
AnnotateUnpublishMemoryRange(__FILE__, __LINE__, pointer, size)
/* DEPRECATED. Don't use it. */
#define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) \
do { \
ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \
ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size); \
} while (0)
/* Instruct the tool to create a happens-before arc between mu->Unlock() and
mu->Lock(). This annotation may slow down the race detector and hide real
races. Normally it is used only when it would be difficult to annotate each
of the mutex's critical sections individually using the annotations above.
This annotation makes sense only for hybrid race detectors. For pure
happens-before detectors this is a no-op. For more details see
http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */
#define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu)
/* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
#define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \
AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu)
/* -------------------------------------------------------------
Annotations useful when defining memory allocators, or when memory that
was protected in one way starts to be protected in another. */
/* Report that a new memory at "address" of size "size" has been allocated.
This might be used when the memory has been retrieved from a free list and
is about to be reused, or when a the locking discipline for a variable
changes. */
#define ANNOTATE_NEW_MEMORY(address, size) \
AnnotateNewMemory(__FILE__, __LINE__, address, size)
/* -------------------------------------------------------------
Annotations useful when defining FIFO queues that transfer data between
threads. */
/* Report that the producer-consumer queue (such as ProducerConsumerQueue) at
address "pcq" has been created. The ANNOTATE_PCQ_* annotations
should be used only for FIFO queues. For non-FIFO queues use
ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
#define ANNOTATE_PCQ_CREATE(pcq) \
AnnotatePCQCreate(__FILE__, __LINE__, pcq)
/* Report that the queue at address "pcq" is about to be destroyed. */
#define ANNOTATE_PCQ_DESTROY(pcq) \
AnnotatePCQDestroy(__FILE__, __LINE__, pcq)
/* Report that we are about to put an element into a FIFO queue at address
"pcq". */
#define ANNOTATE_PCQ_PUT(pcq) \
AnnotatePCQPut(__FILE__, __LINE__, pcq)
/* Report that we've just got an element from a FIFO queue at address "pcq". */
#define ANNOTATE_PCQ_GET(pcq) \
AnnotatePCQGet(__FILE__, __LINE__, pcq)
/* -------------------------------------------------------------
Annotations that suppress errors. It is usually better to express the
program's synchronization using the other annotations, but these can
be used when all else fails. */
/* Report that we may have a benign race at "pointer", with size
"sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the
point where "pointer" has been allocated, preferably close to the point
where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */
#define ANNOTATE_BENIGN_RACE(pointer, description) \
AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \
sizeof(*(pointer)), description)
/* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
the memory range [address, address+size). */
#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description)
/* Request the analysis tool to ignore all reads in the current thread
until ANNOTATE_IGNORE_READS_END is called.
Useful to ignore intentional racey reads, while still checking
other reads and all writes.
See also ANNOTATE_UNPROTECTED_READ. */
#define ANNOTATE_IGNORE_READS_BEGIN() \
AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
/* Stop ignoring reads. */
#define ANNOTATE_IGNORE_READS_END() \
AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
/* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
/* Stop ignoring writes. */
#define ANNOTATE_IGNORE_WRITES_END() \
AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
/* Start ignoring all memory accesses (reads and writes). */
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do {\
ANNOTATE_IGNORE_READS_BEGIN();\
ANNOTATE_IGNORE_WRITES_BEGIN();\
}while(0)\
/* Stop ignoring all memory accesses. */
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do {\
ANNOTATE_IGNORE_WRITES_END();\
ANNOTATE_IGNORE_READS_END();\
}while(0)\
/* Enable (enable!=0) or disable (enable==0) race detection for all threads.
This annotation could be useful if you want to skip expensive race analysis
during some period of program execution, e.g. during initialization. */
#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
AnnotateEnableRaceDetection(__FILE__, __LINE__, enable)
/* -------------------------------------------------------------
Annotations useful for debugging. */
/* Request to trace every access to "address". */
#define ANNOTATE_TRACE_MEMORY(address) \
AnnotateTraceMemory(__FILE__, __LINE__, address)
/* Report the current thread name to a race detector. */
#define ANNOTATE_THREAD_NAME(name) \
AnnotateThreadName(__FILE__, __LINE__, name)
/* -------------------------------------------------------------
Annotations useful when implementing locks. They are not
normally needed by modules that merely use locks.
The "lock" argument is a pointer to the lock object. */
/* Report that a lock has been created at address "lock". */
#define ANNOTATE_RWLOCK_CREATE(lock) \
AnnotateRWLockCreate(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" is about to be destroyed. */
#define ANNOTATE_RWLOCK_DESTROY(lock) \
AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" has been acquired.
is_w=1 for writer lock, is_w=0 for reader lock. */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
/* Report that the lock at address "lock" is about to be released. */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
/* -------------------------------------------------------------
Annotations useful when implementing barriers. They are not
normally needed by modules that merely use barriers.
The "barrier" argument is a pointer to the barrier object. */
/* Report that the "barrier" has been initialized with initial "count".
If 'reinitialization_allowed' is true, initialization is allowed to happen
multiple times w/o calling barrier_destroy() */
#define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
AnnotateBarrierInit(__FILE__, __LINE__, barrier, count, \
reinitialization_allowed)
/* Report that we are about to enter barrier_wait("barrier"). */
#define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
AnnotateBarrierWaitBefore(__FILE__, __LINE__, barrier)
/* Report that we just exited barrier_wait("barrier"). */
#define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
AnnotateBarrierWaitAfter(__FILE__, __LINE__, barrier)
/* Report that the "barrier" has been destroyed. */
#define ANNOTATE_BARRIER_DESTROY(barrier) \
AnnotateBarrierDestroy(__FILE__, __LINE__, barrier)
/* -------------------------------------------------------------
Annotations useful for testing race detectors. */
/* Report that we expect a race on the variable at "address".
Use only in unit tests for a race detector. */
#define ANNOTATE_EXPECT_RACE(address, description) \
AnnotateExpectRace(__FILE__, __LINE__, address, description)
/* A no-op. Insert where you like to test the interceptors. */
#define ANNOTATE_NO_OP(arg) \
AnnotateNoOp(__FILE__, __LINE__, arg)
/* Force the race detector to flush its state. The actual effect depends on
* the implementation of the detector. */
#define ANNOTATE_FLUSH_STATE() \
AnnotateFlushState(__FILE__, __LINE__)
#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
#define ANNOTATE_RWLOCK_CREATE(lock) /* empty */
#define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
#define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */
#define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */
#define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */
#define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */
#define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */
#define ANNOTATE_CONDVAR_WAIT(cv) /* empty */
#define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */
#define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */
#define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */
#define ANNOTATE_HAPPENS_AFTER(obj) /* empty */
#define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */
#define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size) /* empty */
#define ANNOTATE_SWAP_MEMORY_RANGE(address, size) /* empty */
#define ANNOTATE_PCQ_CREATE(pcq) /* empty */
#define ANNOTATE_PCQ_DESTROY(pcq) /* empty */
#define ANNOTATE_PCQ_PUT(pcq) /* empty */
#define ANNOTATE_PCQ_GET(pcq) /* empty */
#define ANNOTATE_NEW_MEMORY(address, size) /* empty */
#define ANNOTATE_EXPECT_RACE(address, description) /* empty */
#define ANNOTATE_BENIGN_RACE(address, description) /* empty */
#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
#define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */
#define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */
#define ANNOTATE_TRACE_MEMORY(arg) /* empty */
#define ANNOTATE_THREAD_NAME(name) /* empty */
#define ANNOTATE_IGNORE_READS_BEGIN() /* empty */
#define ANNOTATE_IGNORE_READS_END() /* empty */
#define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */
#define ANNOTATE_IGNORE_WRITES_END() /* empty */
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */
#define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
#define ANNOTATE_NO_OP(arg) /* empty */
#define ANNOTATE_FLUSH_STATE() /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
/* Macro definitions for GCC attributes that allow static thread safety
analysis to recognize and use some of the dynamic annotations as
escape hatches.
TODO(lcwu): remove the check for __SUPPORT_DYN_ANNOTATION__ once the
default crosstool/GCC supports these GCC attributes. */
#define ANNOTALYSIS_STATIC_INLINE
#define ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY ;
#define ANNOTALYSIS_IGNORE_READS_BEGIN
#define ANNOTALYSIS_IGNORE_READS_END
#define ANNOTALYSIS_IGNORE_WRITES_BEGIN
#define ANNOTALYSIS_IGNORE_WRITES_END
#define ANNOTALYSIS_UNPROTECTED_READ
#if defined(__GNUC__) && (!defined(SWIG)) && (!defined(__clang__)) && \
defined(__SUPPORT_TS_ANNOTATION__) && defined(__SUPPORT_DYN_ANNOTATION__)
#if DYNAMIC_ANNOTATIONS_ENABLED == 0
#define ANNOTALYSIS_ONLY 1
#undef ANNOTALYSIS_STATIC_INLINE
#define ANNOTALYSIS_STATIC_INLINE static inline
#undef ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
#define ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY { (void)file; (void)line; }
#endif
/* Only emit attributes when annotalysis is enabled. */
#if defined(__SUPPORT_TS_ANNOTATION__) && defined(__SUPPORT_DYN_ANNOTATION__)
#undef ANNOTALYSIS_IGNORE_READS_BEGIN
#define ANNOTALYSIS_IGNORE_READS_BEGIN __attribute__ ((ignore_reads_begin))
#undef ANNOTALYSIS_IGNORE_READS_END
#define ANNOTALYSIS_IGNORE_READS_END __attribute__ ((ignore_reads_end))
#undef ANNOTALYSIS_IGNORE_WRITES_BEGIN
#define ANNOTALYSIS_IGNORE_WRITES_BEGIN __attribute__ ((ignore_writes_begin))
#undef ANNOTALYSIS_IGNORE_WRITES_END
#define ANNOTALYSIS_IGNORE_WRITES_END __attribute__ ((ignore_writes_end))
#undef ANNOTALYSIS_UNPROTECTED_READ
#define ANNOTALYSIS_UNPROTECTED_READ __attribute__ ((unprotected_read))
#endif
#endif // defined(__GNUC__) && (!defined(SWIG)) && (!defined(__clang__))
/* Use the macros above rather than using these functions directly. */
#ifdef __cplusplus
extern "C" {
#endif
void AnnotateRWLockCreate(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockDestroy(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockAcquired(const char *file, int line,
const volatile void *lock, long is_w);
void AnnotateRWLockReleased(const char *file, int line,
const volatile void *lock, long is_w);
void AnnotateBarrierInit(const char *file, int line,
const volatile void *barrier, long count,
long reinitialization_allowed);
void AnnotateBarrierWaitBefore(const char *file, int line,
const volatile void *barrier);
void AnnotateBarrierWaitAfter(const char *file, int line,
const volatile void *barrier);
void AnnotateBarrierDestroy(const char *file, int line,
const volatile void *barrier);
void AnnotateCondVarWait(const char *file, int line,
const volatile void *cv,
const volatile void *lock);
void AnnotateCondVarSignal(const char *file, int line,
const volatile void *cv);
void AnnotateCondVarSignalAll(const char *file, int line,
const volatile void *cv);
void AnnotatePublishMemoryRange(const char *file, int line,
const volatile void *address,
long size);
void AnnotateUnpublishMemoryRange(const char *file, int line,
const volatile void *address,
long size);
void AnnotatePCQCreate(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQDestroy(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQPut(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQGet(const char *file, int line,
const volatile void *pcq);
void AnnotateNewMemory(const char *file, int line,
const volatile void *address,
long size);
void AnnotateExpectRace(const char *file, int line,
const volatile void *address,
const char *description);
void AnnotateBenignRace(const char *file, int line,
const volatile void *address,
const char *description);
void AnnotateBenignRaceSized(const char *file, int line,
const volatile void *address,
long size,
const char *description);
void AnnotateMutexIsUsedAsCondVar(const char *file, int line,
const volatile void *mu);
void AnnotateTraceMemory(const char *file, int line,
const volatile void *arg);
void AnnotateThreadName(const char *file, int line,
const char *name);
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreReadsBegin(const char *file, int line)
ANNOTALYSIS_IGNORE_READS_BEGIN ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreReadsEnd(const char *file, int line)
ANNOTALYSIS_IGNORE_READS_END ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreWritesBegin(const char *file, int line)
ANNOTALYSIS_IGNORE_WRITES_BEGIN ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreWritesEnd(const char *file, int line)
ANNOTALYSIS_IGNORE_WRITES_END ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
void AnnotateEnableRaceDetection(const char *file, int line, int enable);
void AnnotateNoOp(const char *file, int line,
const volatile void *arg);
void AnnotateFlushState(const char *file, int line);
/* Return non-zero value if running under valgrind.
If "valgrind.h" is included into dynamic_annotations.c,
the regular valgrind mechanism will be used.
See http://valgrind.org/docs/manual/manual-core-adv.html about
RUNNING_ON_VALGRIND and other valgrind "client requests".
The file "valgrind.h" may be obtained by doing
svn co svn://svn.valgrind.org/valgrind/trunk/include
If for some reason you can't use "valgrind.h" or want to fake valgrind,
there are two ways to make this function return non-zero:
- Use environment variable: export RUNNING_ON_VALGRIND=1
- Make your tool intercept the function RunningOnValgrind() and
change its return value.
*/
int RunningOnValgrind(void);
/* ValgrindSlowdown returns:
* 1.0, if (RunningOnValgrind() == 0)
* 50.0, if (RunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") == NULL)
* atof(getenv("VALGRIND_SLOWDOWN")) otherwise
This function can be used to scale timeout values:
EXAMPLE:
for (;;) {
DoExpensiveBackgroundTask();
SleepForSeconds(5 * ValgrindSlowdown());
}
*/
double ValgrindSlowdown(void);
#ifdef __cplusplus
}
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
/* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
Instead of doing
ANNOTATE_IGNORE_READS_BEGIN();
... = x;
ANNOTATE_IGNORE_READS_END();
one can use
... = ANNOTATE_UNPROTECTED_READ(x); */
template <class T>
inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x)
ANNOTALYSIS_UNPROTECTED_READ {
ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
ANNOTATE_IGNORE_READS_END();
return res;
}
/* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
namespace { \
class static_var ## _annotator { \
public: \
static_var ## _annotator() { \
ANNOTATE_BENIGN_RACE_SIZED(&static_var, \
sizeof(static_var), \
# static_var ": " description); \
} \
}; \
static static_var ## _annotator the ## static_var ## _annotator;\
}
#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
#define ANNOTATE_UNPROTECTED_READ(x) (x)
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
/* Annotalysis, a GCC based static analyzer, is able to understand and use
some of the dynamic annotations defined in this file. However, dynamic
annotations are usually disabled in the opt mode (to avoid additional
runtime overheads) while Annotalysis only works in the opt mode.
In order for Annotalysis to use these dynamic annotations when they
are disabled, we re-define these annotations here. Note that unlike the
original macro definitions above, these macros are expanded to calls to
static inline functions so that the compiler will be able to remove the
calls after the analysis. */
#ifdef ANNOTALYSIS_ONLY
#undef ANNOTALYSIS_ONLY
/* Undefine and re-define the macros that the static analyzer understands. */
#undef ANNOTATE_IGNORE_READS_BEGIN
#define ANNOTATE_IGNORE_READS_BEGIN() \
AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_READS_END
#define ANNOTATE_IGNORE_READS_END() \
AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_WRITES_BEGIN
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_WRITES_END
#define ANNOTATE_IGNORE_WRITES_END() \
AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do { \
ANNOTATE_IGNORE_READS_BEGIN(); \
ANNOTATE_IGNORE_WRITES_BEGIN(); \
}while(0) \
#undef ANNOTATE_IGNORE_READS_AND_WRITES_END
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do { \
ANNOTATE_IGNORE_WRITES_END(); \
ANNOTATE_IGNORE_READS_END(); \
}while(0) \
#if defined(__cplusplus)
#undef ANNOTATE_UNPROTECTED_READ
template <class T>
inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x)
ANNOTALYSIS_UNPROTECTED_READ {
ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
ANNOTATE_IGNORE_READS_END();
return res;
}
#endif /* __cplusplus */
#endif /* ANNOTALYSIS_ONLY */
/* Undefine the macros intended only in this file. */
#undef ANNOTALYSIS_STATIC_INLINE
#undef ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
#endif /* BASE_DYNAMIC_ANNOTATIONS_H_ */

View File

@ -1,443 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in an in-memory Elf image.
//
#include "base/elf_mem_image.h"
#ifdef HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h
#include <stddef.h> // for size_t, ptrdiff_t
#include "base/logging.h"
// From binutils/include/elf/common.h (this doesn't appear to be documented
// anywhere else).
//
// /* This flag appears in a Versym structure. It means that the symbol
// is hidden, and is only visible with an explicit version number.
// This is a GNU extension. */
// #define VERSYM_HIDDEN 0x8000
//
// /* This is the mask for the rest of the Versym information. */
// #define VERSYM_VERSION 0x7fff
#define VERSYM_VERSION 0x7fff
#if __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-const-variable"
#endif
namespace base {
namespace {
template <int N> class ElfClass {
public:
static const int kElfClass = -1;
static int ElfBind(const ElfW(Sym) *) {
CHECK(false); // << "Unexpected word size";
return 0;
}
static int ElfType(const ElfW(Sym) *) {
CHECK(false); // << "Unexpected word size";
return 0;
}
};
template <> class ElfClass<32> {
public:
static const int kElfClass = ELFCLASS32;
static int ElfBind(const ElfW(Sym) *symbol) {
return ELF32_ST_BIND(symbol->st_info);
}
static int ElfType(const ElfW(Sym) *symbol) {
return ELF32_ST_TYPE(symbol->st_info);
}
};
template <> class ElfClass<64> {
public:
static const int kElfClass = ELFCLASS64;
static int ElfBind(const ElfW(Sym) *symbol) {
return ELF64_ST_BIND(symbol->st_info);
}
static int ElfType(const ElfW(Sym) *symbol) {
return ELF64_ST_TYPE(symbol->st_info);
}
};
typedef ElfClass<__WORDSIZE> CurrentElfClass;
// Extract an element from one of the ELF tables, cast it to desired type.
// This is just a simple arithmetic and a glorified cast.
// Callers are responsible for bounds checking.
template <class T>
const T* GetTableElement(const ElfW(Ehdr) *ehdr,
ElfW(Off) table_offset,
ElfW(Word) element_size,
size_t index) {
return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr)
+ table_offset
+ index * element_size);
}
} // namespace
const void *const ElfMemImage::kInvalidBase =
reinterpret_cast<const void *>(~0L);
ElfMemImage::ElfMemImage(const void *base) {
CHECK(base != kInvalidBase);
Init(base);
}
int ElfMemImage::GetNumSymbols() const {
if (!hash_) {
return 0;
}
// See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
return hash_[1];
}
const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
CHECK_LT(index, GetNumSymbols());
return dynsym_ + index;
}
const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
CHECK_LT(index, GetNumSymbols());
return versym_ + index;
}
const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const {
CHECK_LT(index, ehdr_->e_phnum);
return GetTableElement<ElfW(Phdr)>(ehdr_,
ehdr_->e_phoff,
ehdr_->e_phentsize,
index);
}
const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const {
CHECK_LT(offset, strsize_);
return dynstr_ + offset;
}
const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const {
if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) {
// Symbol corresponds to "special" (e.g. SHN_ABS) section.
return reinterpret_cast<const void *>(sym->st_value);
}
CHECK_LT(link_base_, sym->st_value);
return GetTableElement<char>(ehdr_, 0, 1, sym->st_value) - link_base_;
}
const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const {
CHECK_LE(index, verdefnum_);
const ElfW(Verdef) *version_definition = verdef_;
while (version_definition->vd_ndx < index && version_definition->vd_next) {
const char *const version_definition_as_char =
reinterpret_cast<const char *>(version_definition);
version_definition =
reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
version_definition->vd_next);
}
return version_definition->vd_ndx == index ? version_definition : NULL;
}
const ElfW(Verdaux) *ElfMemImage::GetVerdefAux(
const ElfW(Verdef) *verdef) const {
return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1);
}
const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const {
CHECK_LT(offset, strsize_);
return dynstr_ + offset;
}
void ElfMemImage::Init(const void *base) {
ehdr_ = NULL;
dynsym_ = NULL;
dynstr_ = NULL;
versym_ = NULL;
verdef_ = NULL;
hash_ = NULL;
strsize_ = 0;
verdefnum_ = 0;
link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
if (!base) {
return;
}
const intptr_t base_as_uintptr_t = reinterpret_cast<uintptr_t>(base);
// Fake VDSO has low bit set.
const bool fake_vdso = ((base_as_uintptr_t & 1) != 0);
base = reinterpret_cast<const void *>(base_as_uintptr_t & ~1);
const char *const base_as_char = reinterpret_cast<const char *>(base);
if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 ||
base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) {
RAW_DCHECK(false, "no ELF magic"); // at %p", base);
return;
}
int elf_class = base_as_char[EI_CLASS];
if (elf_class != CurrentElfClass::kElfClass) {
DCHECK_EQ(elf_class, CurrentElfClass::kElfClass);
return;
}
switch (base_as_char[EI_DATA]) {
case ELFDATA2LSB: {
if (__LITTLE_ENDIAN != __BYTE_ORDER) {
DCHECK_EQ(__LITTLE_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
return;
}
break;
}
case ELFDATA2MSB: {
if (__BIG_ENDIAN != __BYTE_ORDER) {
DCHECK_EQ(__BIG_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
return;
}
break;
}
default: {
RAW_DCHECK(false, "unexpected data encoding"); // << base_as_char[EI_DATA];
return;
}
}
ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
const ElfW(Phdr) *dynamic_program_header = NULL;
for (int i = 0; i < ehdr_->e_phnum; ++i) {
const ElfW(Phdr) *const program_header = GetPhdr(i);
switch (program_header->p_type) {
case PT_LOAD:
if (link_base_ == ~0L) {
link_base_ = program_header->p_vaddr;
}
break;
case PT_DYNAMIC:
dynamic_program_header = program_header;
break;
}
}
if (link_base_ == ~0L || !dynamic_program_header) {
RAW_DCHECK(~0L != link_base_, "no PT_LOADs in VDSO");
RAW_DCHECK(dynamic_program_header, "no PT_DYNAMIC in VDSO");
// Mark this image as not present. Can not recur infinitely.
Init(0);
return;
}
ptrdiff_t relocation =
base_as_char - reinterpret_cast<const char *>(link_base_);
ElfW(Dyn) *dynamic_entry =
reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
relocation);
for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
ElfW(Xword) value = dynamic_entry->d_un.d_val;
if (fake_vdso) {
// A complication: in the real VDSO, dynamic entries are not relocated
// (it wasn't loaded by a dynamic loader). But when testing with a
// "fake" dlopen()ed vdso library, the loader relocates some (but
// not all!) of them before we get here.
if (dynamic_entry->d_tag == DT_VERDEF) {
// The only dynamic entry (of the ones we care about) libc-2.3.6
// loader doesn't relocate.
value += relocation;
}
} else {
// Real VDSO. Everything needs to be relocated.
value += relocation;
}
switch (dynamic_entry->d_tag) {
case DT_HASH:
hash_ = reinterpret_cast<ElfW(Word) *>(value);
break;
case DT_SYMTAB:
dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
break;
case DT_STRTAB:
dynstr_ = reinterpret_cast<const char *>(value);
break;
case DT_VERSYM:
versym_ = reinterpret_cast<ElfW(Versym) *>(value);
break;
case DT_VERDEF:
verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
break;
case DT_VERDEFNUM:
verdefnum_ = dynamic_entry->d_un.d_val;
break;
case DT_STRSZ:
strsize_ = dynamic_entry->d_un.d_val;
break;
default:
// Unrecognized entries explicitly ignored.
break;
}
}
if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
!verdef_ || !verdefnum_ || !strsize_) {
RAW_DCHECK(hash_, "invalid VDSO (no DT_HASH)");
RAW_DCHECK(dynsym_, "invalid VDSO (no DT_SYMTAB)");
RAW_DCHECK(dynstr_, "invalid VDSO (no DT_STRTAB)");
RAW_DCHECK(versym_, "invalid VDSO (no DT_VERSYM)");
RAW_DCHECK(verdef_, "invalid VDSO (no DT_VERDEF)");
RAW_DCHECK(verdefnum_, "invalid VDSO (no DT_VERDEFNUM)");
RAW_DCHECK(strsize_, "invalid VDSO (no DT_STRSZ)");
// Mark this image as not present. Can not recur infinitely.
Init(0);
return;
}
}
bool ElfMemImage::LookupSymbol(const char *name,
const char *version,
int type,
SymbolInfo *info) const {
for (SymbolIterator it = begin(); it != end(); ++it) {
if (strcmp(it->name, name) == 0 && strcmp(it->version, version) == 0 &&
CurrentElfClass::ElfType(it->symbol) == type) {
if (info) {
*info = *it;
}
return true;
}
}
return false;
}
bool ElfMemImage::LookupSymbolByAddress(const void *address,
SymbolInfo *info_out) const {
for (SymbolIterator it = begin(); it != end(); ++it) {
const char *const symbol_start =
reinterpret_cast<const char *>(it->address);
const char *const symbol_end = symbol_start + it->symbol->st_size;
if (symbol_start <= address && address < symbol_end) {
if (info_out) {
// Client wants to know details for that symbol (the usual case).
if (CurrentElfClass::ElfBind(it->symbol) == STB_GLOBAL) {
// Strong symbol; just return it.
*info_out = *it;
return true;
} else {
// Weak or local. Record it, but keep looking for a strong one.
*info_out = *it;
}
} else {
// Client only cares if there is an overlapping symbol.
return true;
}
}
}
return false;
}
ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
: index_(index), image_(image) {
}
const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
return &info_;
}
const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const {
return info_;
}
bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const {
return this->image_ == rhs.image_ && this->index_ == rhs.index_;
}
bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const {
return !(*this == rhs);
}
ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() {
this->Update(1);
return *this;
}
ElfMemImage::SymbolIterator ElfMemImage::begin() const {
SymbolIterator it(this, 0);
it.Update(0);
return it;
}
ElfMemImage::SymbolIterator ElfMemImage::end() const {
return SymbolIterator(this, GetNumSymbols());
}
void ElfMemImage::SymbolIterator::Update(int increment) {
const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
CHECK(image->IsPresent() || increment == 0);
if (!image->IsPresent()) {
return;
}
index_ += increment;
if (index_ >= image->GetNumSymbols()) {
index_ = image->GetNumSymbols();
return;
}
const ElfW(Sym) *symbol = image->GetDynsym(index_);
const ElfW(Versym) *version_symbol = image->GetVersym(index_);
CHECK(symbol && version_symbol);
const char *const symbol_name = image->GetDynstr(symbol->st_name);
const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
const ElfW(Verdef) *version_definition = NULL;
const char *version_name = "";
if (symbol->st_shndx == SHN_UNDEF) {
// Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
// version_index could well be greater than verdefnum_, so calling
// GetVerdef(version_index) may trigger assertion.
} else {
version_definition = image->GetVerdef(version_index);
}
if (version_definition) {
// I am expecting 1 or 2 auxiliary entries: 1 for the version itself,
// optional 2nd if the version has a parent.
CHECK_LE(1, version_definition->vd_cnt);
CHECK_LE(version_definition->vd_cnt, 2);
const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition);
version_name = image->GetVerstr(version_aux->vda_name);
}
info_.name = symbol_name;
info_.version = version_name;
info_.address = image->GetSymAddr(symbol);
info_.symbol = symbol;
}
} // namespace base
#if __clang__
#pragma clang diagnostic pop
#endif
#endif // HAVE_ELF_MEM_IMAGE

View File

@ -1,135 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup for in-memory Elf images.
#ifndef BASE_ELF_MEM_IMAGE_H_
#define BASE_ELF_MEM_IMAGE_H_
#include "../config.h"
#ifdef HAVE_FEATURES_H
#include <features.h> // for __GLIBC__
#endif
// Maybe one day we can rewrite this file not to require the elf
// symbol extensions in glibc, but for right now we need them.
#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__)
#define HAVE_ELF_MEM_IMAGE 1
#include <stdlib.h>
#include <link.h> // for ElfW
namespace base {
// An in-memory ELF image (may not exist on disk).
class ElfMemImage {
public:
// Sentinel: there could never be an elf image at this address.
static const void *const kInvalidBase;
// Information about a single vdso symbol.
// All pointers are into .dynsym, .dynstr, or .text of the VDSO.
// Do not free() them or modify through them.
struct SymbolInfo {
const char *name; // E.g. "__vdso_getcpu"
const char *version; // E.g. "LINUX_2.6", could be ""
// for unversioned symbol.
const void *address; // Relocated symbol address.
const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table.
};
// Supports iteration over all dynamic symbols.
class SymbolIterator {
public:
friend class ElfMemImage;
const SymbolInfo *operator->() const;
const SymbolInfo &operator*() const;
SymbolIterator& operator++();
bool operator!=(const SymbolIterator &rhs) const;
bool operator==(const SymbolIterator &rhs) const;
private:
SymbolIterator(const void *const image, int index);
void Update(int incr);
SymbolInfo info_;
int index_;
const void *const image_;
};
explicit ElfMemImage(const void *base);
void Init(const void *base);
bool IsPresent() const { return ehdr_ != NULL; }
const ElfW(Phdr)* GetPhdr(int index) const;
const ElfW(Sym)* GetDynsym(int index) const;
const ElfW(Versym)* GetVersym(int index) const;
const ElfW(Verdef)* GetVerdef(int index) const;
const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
const char* GetDynstr(ElfW(Word) offset) const;
const void* GetSymAddr(const ElfW(Sym) *sym) const;
const char* GetVerstr(ElfW(Word) offset) const;
int GetNumSymbols() const;
SymbolIterator begin() const;
SymbolIterator end() const;
// Look up versioned dynamic symbol in the image.
// Returns false if image is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out != NULL, additional details are filled in.
bool LookupSymbol(const char *name, const char *version,
int symbol_type, SymbolInfo *info_out) const;
// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if image isn't present
// or doesn't have a symbol overlapping given address.
// If info_out != NULL, additional details are filled in.
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
private:
const ElfW(Ehdr) *ehdr_;
const ElfW(Sym) *dynsym_;
const ElfW(Versym) *versym_;
const ElfW(Verdef) *verdef_;
const ElfW(Word) *hash_;
const char *dynstr_;
size_t strsize_;
size_t verdefnum_;
ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
};
} // namespace base
#endif // __ELF__ and __GLIBC__ and !__native_client__
#endif // BASE_ELF_MEM_IMAGE_H_

View File

@ -1,401 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005-2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke, Carl Crous
*/
#ifndef _ELFCORE_H
#define _ELFCORE_H
#ifdef __cplusplus
extern "C" {
#endif
/* We currently only support x86-32, x86-64, ARM, MIPS, PPC on Linux.
* Porting to other related platforms should not be difficult.
*/
#if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
defined(__mips__) || defined(__PPC__)) && defined(__linux)
#include <stdarg.h>
#include <stdint.h>
#include <sys/types.h>
#include "../config.h"
/* Define the DUMPER symbol to make sure that there is exactly one
* core dumper built into the library.
*/
#define DUMPER "ELF"
/* By the time that we get a chance to read CPU registers in the
* calling thread, they are already in a not particularly useful
* state. Besides, there will be multiple frames on the stack that are
* just making the core file confusing. To fix this problem, we take a
* snapshot of the frame pointer, stack pointer, and instruction
* pointer at an earlier time, and then insert these values into the
* core file.
*/
#if defined(__i386__) || defined(__x86_64__)
typedef struct i386_regs { /* Normal (non-FPU) CPU registers */
#ifdef __x86_64__
#define BP rbp
#define SP rsp
#define IP rip
uint64_t r15,r14,r13,r12,rbp,rbx,r11,r10;
uint64_t r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax;
uint64_t rip,cs,eflags;
uint64_t rsp,ss;
uint64_t fs_base, gs_base;
uint64_t ds,es,fs,gs;
#else
#define BP ebp
#define SP esp
#define IP eip
uint32_t ebx, ecx, edx, esi, edi, ebp, eax;
uint16_t ds, __ds, es, __es;
uint16_t fs, __fs, gs, __gs;
uint32_t orig_eax, eip;
uint16_t cs, __cs;
uint32_t eflags, esp;
uint16_t ss, __ss;
#endif
} i386_regs;
#elif defined(__arm__)
typedef struct arm_regs { /* General purpose registers */
#define BP uregs[11] /* Frame pointer */
#define SP uregs[13] /* Stack pointer */
#define IP uregs[15] /* Program counter */
#define LR uregs[14] /* Link register */
long uregs[18];
} arm_regs;
#elif defined(__mips__)
typedef struct mips_regs {
unsigned long pad[6]; /* Unused padding to match kernel structures */
unsigned long uregs[32]; /* General purpose registers. */
unsigned long hi; /* Used for multiplication and division. */
unsigned long lo;
unsigned long cp0_epc; /* Program counter. */
unsigned long cp0_badvaddr;
unsigned long cp0_status;
unsigned long cp0_cause;
unsigned long unused;
} mips_regs;
#elif defined (__PPC__)
typedef struct ppc_regs {
#define SP uregs[1] /* Stack pointer */
#define IP rip /* Program counter */
#define LR lr /* Link register */
unsigned long uregs[32]; /* General Purpose Registers - r0-r31. */
double fpr[32]; /* Floating-Point Registers - f0-f31. */
unsigned long rip; /* Program counter. */
unsigned long msr;
unsigned long ccr;
unsigned long lr;
unsigned long ctr;
unsigned long xeq;
unsigned long mq;
} ppc_regs;
#endif
#if defined(__i386__) && defined(__GNUC__)
/* On x86 we provide an optimized version of the FRAME() macro, if the
* compiler supports a GCC-style asm() directive. This results in somewhat
* more accurate values for CPU registers.
*/
typedef struct Frame {
struct i386_regs uregs;
int errno_;
pid_t tid;
} Frame;
#define FRAME(f) Frame f; \
do { \
f.errno_ = errno; \
f.tid = sys_gettid(); \
__asm__ volatile ( \
"push %%ebp\n" \
"push %%ebx\n" \
"mov %%ebx,0(%%eax)\n" \
"mov %%ecx,4(%%eax)\n" \
"mov %%edx,8(%%eax)\n" \
"mov %%esi,12(%%eax)\n" \
"mov %%edi,16(%%eax)\n" \
"mov %%ebp,20(%%eax)\n" \
"mov %%eax,24(%%eax)\n" \
"mov %%ds,%%ebx\n" \
"mov %%ebx,28(%%eax)\n" \
"mov %%es,%%ebx\n" \
"mov %%ebx,32(%%eax)\n" \
"mov %%fs,%%ebx\n" \
"mov %%ebx,36(%%eax)\n" \
"mov %%gs,%%ebx\n" \
"mov %%ebx, 40(%%eax)\n" \
"call 0f\n" \
"0:pop %%ebx\n" \
"add $1f-0b,%%ebx\n" \
"mov %%ebx,48(%%eax)\n" \
"mov %%cs,%%ebx\n" \
"mov %%ebx,52(%%eax)\n" \
"pushf\n" \
"pop %%ebx\n" \
"mov %%ebx,56(%%eax)\n" \
"mov %%esp,%%ebx\n" \
"add $8,%%ebx\n" \
"mov %%ebx,60(%%eax)\n" \
"mov %%ss,%%ebx\n" \
"mov %%ebx,64(%%eax)\n" \
"pop %%ebx\n" \
"pop %%ebp\n" \
"1:" \
: : "a" (&f) : "memory"); \
} while (0)
#define SET_FRAME(f,r) \
do { \
errno = (f).errno_; \
(r) = (f).uregs; \
} while (0)
#elif defined(__x86_64__) && defined(__GNUC__)
/* The FRAME and SET_FRAME macros for x86_64. */
typedef struct Frame {
struct i386_regs uregs;
int errno_;
pid_t tid;
} Frame;
#define FRAME(f) Frame f; \
do { \
f.errno_ = errno; \
f.tid = sys_gettid(); \
__asm__ volatile ( \
"push %%rbp\n" \
"push %%rbx\n" \
"mov %%r15,0(%%rax)\n" \
"mov %%r14,8(%%rax)\n" \
"mov %%r13,16(%%rax)\n" \
"mov %%r12,24(%%rax)\n" \
"mov %%rbp,32(%%rax)\n" \
"mov %%rbx,40(%%rax)\n" \
"mov %%r11,48(%%rax)\n" \
"mov %%r10,56(%%rax)\n" \
"mov %%r9,64(%%rax)\n" \
"mov %%r8,72(%%rax)\n" \
"mov %%rax,80(%%rax)\n" \
"mov %%rcx,88(%%rax)\n" \
"mov %%rdx,96(%%rax)\n" \
"mov %%rsi,104(%%rax)\n" \
"mov %%rdi,112(%%rax)\n" \
"mov %%ds,%%rbx\n" \
"mov %%rbx,184(%%rax)\n" \
"mov %%es,%%rbx\n" \
"mov %%rbx,192(%%rax)\n" \
"mov %%fs,%%rbx\n" \
"mov %%rbx,200(%%rax)\n" \
"mov %%gs,%%rbx\n" \
"mov %%rbx,208(%%rax)\n" \
"call 0f\n" \
"0:pop %%rbx\n" \
"add $1f-0b,%%rbx\n" \
"mov %%rbx,128(%%rax)\n" \
"mov %%cs,%%rbx\n" \
"mov %%rbx,136(%%rax)\n" \
"pushf\n" \
"pop %%rbx\n" \
"mov %%rbx,144(%%rax)\n" \
"mov %%rsp,%%rbx\n" \
"add $16,%%ebx\n" \
"mov %%rbx,152(%%rax)\n" \
"mov %%ss,%%rbx\n" \
"mov %%rbx,160(%%rax)\n" \
"pop %%rbx\n" \
"pop %%rbp\n" \
"1:" \
: : "a" (&f) : "memory"); \
} while (0)
#define SET_FRAME(f,r) \
do { \
errno = (f).errno_; \
(f).uregs.fs_base = (r).fs_base; \
(f).uregs.gs_base = (r).gs_base; \
(r) = (f).uregs; \
} while (0)
#elif defined(__arm__) && defined(__GNUC__)
/* ARM calling conventions are a little more tricky. A little assembly
* helps in obtaining an accurate snapshot of all registers.
*/
typedef struct Frame {
struct arm_regs arm;
int errno_;
pid_t tid;
} Frame;
#define FRAME(f) Frame f; \
do { \
long cpsr; \
f.errno_ = errno; \
f.tid = sys_gettid(); \
__asm__ volatile( \
"stmia %0, {r0-r15}\n" /* All integer regs */\
: : "r"(&f.arm) : "memory"); \
f.arm.uregs[16] = 0; \
__asm__ volatile( \
"mrs %0, cpsr\n" /* Condition code reg */\
: "=r"(cpsr)); \
f.arm.uregs[17] = cpsr; \
} while (0)
#define SET_FRAME(f,r) \
do { \
/* Don't override the FPU status register. */\
/* Use the value obtained from ptrace(). This*/\
/* works, because our code does not perform */\
/* any FPU operations, itself. */\
long fps = (f).arm.uregs[16]; \
errno = (f).errno_; \
(r) = (f).arm; \
(r).uregs[16] = fps; \
} while (0)
#elif defined(__mips__) && defined(__GNUC__)
typedef struct Frame {
struct mips_regs mips_regs;
int errno_;
pid_t tid;
} Frame;
#define MIPSREG(n) ({ register unsigned long r __asm__("$"#n); r; })
#define FRAME(f) Frame f = { 0 }; \
do { \
unsigned long hi, lo; \
register unsigned long pc __asm__("$31"); \
f.mips_regs.uregs[ 0] = MIPSREG( 0); \
f.mips_regs.uregs[ 1] = MIPSREG( 1); \
f.mips_regs.uregs[ 2] = MIPSREG( 2); \
f.mips_regs.uregs[ 3] = MIPSREG( 3); \
f.mips_regs.uregs[ 4] = MIPSREG( 4); \
f.mips_regs.uregs[ 5] = MIPSREG( 5); \
f.mips_regs.uregs[ 6] = MIPSREG( 6); \
f.mips_regs.uregs[ 7] = MIPSREG( 7); \
f.mips_regs.uregs[ 8] = MIPSREG( 8); \
f.mips_regs.uregs[ 9] = MIPSREG( 9); \
f.mips_regs.uregs[10] = MIPSREG(10); \
f.mips_regs.uregs[11] = MIPSREG(11); \
f.mips_regs.uregs[12] = MIPSREG(12); \
f.mips_regs.uregs[13] = MIPSREG(13); \
f.mips_regs.uregs[14] = MIPSREG(14); \
f.mips_regs.uregs[15] = MIPSREG(15); \
f.mips_regs.uregs[16] = MIPSREG(16); \
f.mips_regs.uregs[17] = MIPSREG(17); \
f.mips_regs.uregs[18] = MIPSREG(18); \
f.mips_regs.uregs[19] = MIPSREG(19); \
f.mips_regs.uregs[20] = MIPSREG(20); \
f.mips_regs.uregs[21] = MIPSREG(21); \
f.mips_regs.uregs[22] = MIPSREG(22); \
f.mips_regs.uregs[23] = MIPSREG(23); \
f.mips_regs.uregs[24] = MIPSREG(24); \
f.mips_regs.uregs[25] = MIPSREG(25); \
f.mips_regs.uregs[26] = MIPSREG(26); \
f.mips_regs.uregs[27] = MIPSREG(27); \
f.mips_regs.uregs[28] = MIPSREG(28); \
f.mips_regs.uregs[29] = MIPSREG(29); \
f.mips_regs.uregs[30] = MIPSREG(30); \
f.mips_regs.uregs[31] = MIPSREG(31); \
__asm__ volatile ("mfhi %0" : "=r"(hi)); \
__asm__ volatile ("mflo %0" : "=r"(lo)); \
__asm__ volatile ("jal 1f; 1:nop" : "=r"(pc)); \
f.mips_regs.hi = hi; \
f.mips_regs.lo = lo; \
f.mips_regs.cp0_epc = pc; \
f.errno_ = errno; \
f.tid = sys_gettid(); \
} while (0)
#define SET_FRAME(f,r) \
do { \
errno = (f).errno_; \
memcpy((r).uregs, (f).mips_regs.uregs, \
32*sizeof(unsigned long)); \
(r).hi = (f).mips_regs.hi; \
(r).lo = (f).mips_regs.lo; \
(r).cp0_epc = (f).mips_regs.cp0_epc; \
} while (0)
#else
/* If we do not have a hand-optimized assembly version of the FRAME()
* macro, we cannot reliably unroll the stack. So, we show a few additional
* stack frames for the coredumper.
*/
typedef struct Frame {
pid_t tid;
} Frame;
#define FRAME(f) Frame f; do { f.tid = sys_gettid(); } while (0)
#define SET_FRAME(f,r) do { } while (0)
#endif
/* Internal function for generating a core file. This API can change without
* notice and is only supposed to be used internally by the core dumper.
*
* This function works for both single- and multi-threaded core
* dumps. If called as
*
* FRAME(frame);
* InternalGetCoreDump(&frame, 0, NULL, ap);
*
* it creates a core file that only contains information about the
* calling thread.
*
* Optionally, the caller can provide information about other threads
* by passing their process ids in "thread_pids". The process id of
* the caller should not be included in this array. All of the threads
* must have been attached to with ptrace(), prior to calling this
* function. They will be detached when "InternalGetCoreDump()" returns.
*
* This function either returns a file handle that can be read for obtaining
* a core dump, or "-1" in case of an error. In the latter case, "errno"
* will be set appropriately.
*
* While "InternalGetCoreDump()" is not technically async signal safe, you
* might be tempted to invoke it from a signal handler. The code goes to
* great lengths to make a best effort that this will actually work. But in
* any case, you must make sure that you preserve the value of "errno"
* yourself. It is guaranteed to be clobbered otherwise.
*
* Also, "InternalGetCoreDump" is not strictly speaking re-entrant. Again,
* it makes a best effort to behave reasonably when called in a multi-
* threaded environment, but it is ultimately the caller's responsibility
* to provide locking.
*/
int InternalGetCoreDump(void *frame, int num_threads, pid_t *thread_pids,
va_list ap
/* const struct CoreDumpParameters *params,
const char *file_name,
const char *PATH
*/);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ELFCORE_H */

View File

@ -1,74 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Jacob Hoffman-Andrews
#ifndef _GOOGLEINIT_H
#define _GOOGLEINIT_H
#include "base/logging.h"
class GoogleInitializer {
public:
typedef void (*VoidFunction)(void);
GoogleInitializer(const char* name, VoidFunction ctor, VoidFunction dtor)
: name_(name), destructor_(dtor) {
RAW_VLOG(10, "<GoogleModuleObject> constructing: %s\n", name_);
if (ctor)
ctor();
}
~GoogleInitializer() {
RAW_VLOG(10, "<GoogleModuleObject> destroying: %s\n", name_);
if (destructor_)
destructor_();
}
private:
const char* const name_;
const VoidFunction destructor_;
};
#define REGISTER_MODULE_INITIALIZER(name, body) \
namespace { \
static void google_init_module_##name () { body; } \
GoogleInitializer google_initializer_module_##name(#name, \
google_init_module_##name, NULL); \
}
#define REGISTER_MODULE_DESTRUCTOR(name, body) \
namespace { \
static void google_destruct_module_##name () { body; } \
GoogleInitializer google_destructor_module_##name(#name, \
NULL, google_destruct_module_##name); \
}
#endif /* _GOOGLEINIT_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,707 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#include "base/linuxthreads.h"
#ifdef THREADS
#ifdef __cplusplus
extern "C" {
#endif
#include <sched.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <sys/prctl.h>
#include <semaphore.h>
#include "base/linux_syscall_support.h"
#include "base/thread_lister.h"
#ifndef CLONE_UNTRACED
#define CLONE_UNTRACED 0x00800000
#endif
/* Synchronous signals that should not be blocked while in the lister thread.
*/
static const int sync_signals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
SIGXCPU, SIGXFSZ };
/* itoa() is not a standard function, and we cannot safely call printf()
* after suspending threads. So, we just implement our own copy. A
* recursive approach is the easiest here.
*/
static char *local_itoa(char *buf, int i) {
if (i < 0) {
*buf++ = '-';
return local_itoa(buf, -i);
} else {
if (i >= 10)
buf = local_itoa(buf, i/10);
*buf++ = (i%10) + '0';
*buf = '\000';
return buf;
}
}
/* Wrapper around clone() that runs "fn" on the same stack as the
* caller! Unlike fork(), the cloned thread shares the same address space.
* The caller must be careful to use only minimal amounts of stack until
* the cloned thread has returned.
* There is a good chance that the cloned thread and the caller will share
* the same copy of errno!
*/
#ifdef __GNUC__
#if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3
/* Try to force this function into a separate stack frame, and make sure
* that arguments are passed on the stack.
*/
static int local_clone (int (*fn)(void *), void *arg, ...)
__attribute__ ((noinline));
#endif
#endif
/* To avoid the gap cross page boundaries, increase by the large parge
* size mostly PowerPC system uses. */
#ifdef __PPC64__
#define CLONE_STACK_SIZE 65536
#else
#define CLONE_STACK_SIZE 4096
#endif
static int local_clone (int (*fn)(void *), void *arg, ...) {
/* Leave 4kB of gap between the callers stack and the new clone. This
* should be more than sufficient for the caller to call waitpid() until
* the cloned thread terminates.
*
* It is important that we set the CLONE_UNTRACED flag, because newer
* versions of "gdb" otherwise attempt to attach to our thread, and will
* attempt to reap its status codes. This subsequently results in the
* caller hanging indefinitely in waitpid(), waiting for a change in
* status that will never happen. By setting the CLONE_UNTRACED flag, we
* prevent "gdb" from stealing events, but we still expect the thread
* lister to fail, because it cannot PTRACE_ATTACH to the process that
* is being debugged. This is OK and the error code will be reported
* correctly.
*/
return sys_clone(fn, (char *)&arg - CLONE_STACK_SIZE,
CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_UNTRACED, arg, 0, 0, 0);
}
/* Local substitute for the atoi() function, which is not necessarily safe
* to call once threads are suspended (depending on whether libc looks up
* locale information, when executing atoi()).
*/
static int local_atoi(const char *s) {
int n = 0;
int neg = *s == '-';
if (neg)
s++;
while (*s >= '0' && *s <= '9')
n = 10*n + (*s++ - '0');
return neg ? -n : n;
}
/* Re-runs fn until it doesn't cause EINTR
*/
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
/* Wrap a class around system calls, in order to give us access to
* a private copy of errno. This only works in C++, but it has the
* advantage of not needing nested functions, which are a non-standard
* language extension.
*/
#ifdef __cplusplus
namespace {
class SysCalls {
public:
#define SYS_CPLUSPLUS
#define SYS_ERRNO my_errno
#define SYS_INLINE inline
#define SYS_PREFIX -1
#undef SYS_LINUX_SYSCALL_SUPPORT_H
#include "linux_syscall_support.h"
SysCalls() : my_errno(0) { }
int my_errno;
};
}
#define ERRNO sys.my_errno
#else
#define ERRNO my_errno
#endif
/* Wrapper for open() which is guaranteed to never return EINTR.
*/
static int c_open(const char *fname, int flags, int mode) {
ssize_t rc;
NO_INTR(rc = sys_open(fname, flags, mode));
return rc;
}
/* abort() is not safely reentrant, and changes it's behavior each time
* it is called. This means, if the main application ever called abort()
* we cannot safely call it again. This would happen if we were called
* from a SIGABRT signal handler in the main application. So, document
* that calling SIGABRT from the thread lister makes it not signal safe
* (and vice-versa).
* Also, since we share address space with the main application, we
* cannot call abort() from the callback and expect the main application
* to behave correctly afterwards. In fact, the only thing we can do, is
* to terminate the main application with extreme prejudice (aka
* PTRACE_KILL).
* We set up our own SIGABRT handler to do this.
* In order to find the main application from the signal handler, we
* need to store information about it in global variables. This is
* safe, because the main application should be suspended at this
* time. If the callback ever called TCMalloc_ResumeAllProcessThreads(), then
* we are running a higher risk, though. So, try to avoid calling
* abort() after calling TCMalloc_ResumeAllProcessThreads.
*/
static volatile int *sig_pids, sig_num_threads, sig_proc, sig_marker;
/* Signal handler to help us recover from dying while we are attached to
* other threads.
*/
static void SignalHandler(int signum, siginfo_t *si, void *data) {
if (sig_pids != NULL) {
if (signum == SIGABRT) {
while (sig_num_threads-- > 0) {
/* Not sure if sched_yield is really necessary here, but it does not */
/* hurt, and it might be necessary for the same reasons that we have */
/* to do so in sys_ptrace_detach(). */
sys_sched_yield();
sys_ptrace(PTRACE_KILL, sig_pids[sig_num_threads], 0, 0);
}
} else if (sig_num_threads > 0) {
TCMalloc_ResumeAllProcessThreads(sig_num_threads, (int *)sig_pids);
}
}
sig_pids = NULL;
if (sig_marker >= 0)
NO_INTR(sys_close(sig_marker));
sig_marker = -1;
if (sig_proc >= 0)
NO_INTR(sys_close(sig_proc));
sig_proc = -1;
sys__exit(signum == SIGABRT ? 1 : 2);
}
/* Try to dirty the stack, and hope that the compiler is not smart enough
* to optimize this function away. Or worse, the compiler could inline the
* function and permanently allocate the data on the stack.
*/
static void DirtyStack(size_t amount) {
char buf[amount];
memset(buf, 0, amount);
sys_read(-1, buf, amount);
}
/* Data structure for passing arguments to the lister thread.
*/
#define ALT_STACKSIZE (MINSIGSTKSZ + 4096)
struct ListerParams {
int result, err;
char *altstack_mem;
ListAllProcessThreadsCallBack callback;
void *parameter;
va_list ap;
sem_t *lock;
};
static void ListerThread(struct ListerParams *args) {
int found_parent = 0;
pid_t clone_pid = sys_gettid(), ppid = sys_getppid();
char proc_self_task[80], marker_name[48], *marker_path;
const char *proc_paths[3];
const char *const *proc_path = proc_paths;
int proc = -1, marker = -1, num_threads = 0;
int max_threads = 0, sig;
struct kernel_stat marker_sb, proc_sb;
stack_t altstack;
/* Wait for parent thread to set appropriate permissions
* to allow ptrace activity
*/
if (sem_wait(args->lock) < 0) {
goto failure;
}
/* Create "marker" that we can use to detect threads sharing the same
* address space and the same file handles. By setting the FD_CLOEXEC flag
* we minimize the risk of misidentifying child processes as threads;
* and since there is still a race condition, we will filter those out
* later, anyway.
*/
if ((marker = sys_socket(PF_LOCAL, SOCK_DGRAM, 0)) < 0 ||
sys_fcntl(marker, F_SETFD, FD_CLOEXEC) < 0) {
failure:
args->result = -1;
args->err = errno;
if (marker >= 0)
NO_INTR(sys_close(marker));
sig_marker = marker = -1;
if (proc >= 0)
NO_INTR(sys_close(proc));
sig_proc = proc = -1;
sys__exit(1);
}
/* Compute search paths for finding thread directories in /proc */
local_itoa(strrchr(strcpy(proc_self_task, "/proc/"), '\000'), ppid);
strcpy(marker_name, proc_self_task);
marker_path = marker_name + strlen(marker_name);
strcat(proc_self_task, "/task/");
proc_paths[0] = proc_self_task; /* /proc/$$/task/ */
proc_paths[1] = "/proc/"; /* /proc/ */
proc_paths[2] = NULL;
/* Compute path for marker socket in /proc */
local_itoa(strcpy(marker_path, "/fd/") + 4, marker);
if (sys_stat(marker_name, &marker_sb) < 0) {
goto failure;
}
/* Catch signals on an alternate pre-allocated stack. This way, we can
* safely execute the signal handler even if we ran out of memory.
*/
memset(&altstack, 0, sizeof(altstack));
altstack.ss_sp = args->altstack_mem;
altstack.ss_flags = 0;
altstack.ss_size = ALT_STACKSIZE;
sys_sigaltstack(&altstack, (const stack_t *)NULL);
/* Some kernels forget to wake up traced processes, when the
* tracer dies. So, intercept synchronous signals and make sure
* that we wake up our tracees before dying. It is the caller's
* responsibility to ensure that asynchronous signals do not
* interfere with this function.
*/
sig_marker = marker;
sig_proc = -1;
for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
struct kernel_sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction_ = SignalHandler;
sys_sigfillset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK|SA_SIGINFO|SA_RESETHAND;
sys_sigaction(sync_signals[sig], &sa, (struct kernel_sigaction *)NULL);
}
/* Read process directories in /proc/... */
for (;;) {
/* Some kernels know about threads, and hide them in "/proc"
* (although they are still there, if you know the process
* id). Threads are moved into a separate "task" directory. We
* check there first, and then fall back on the older naming
* convention if necessary.
*/
if ((sig_proc = proc = c_open(*proc_path, O_RDONLY|O_DIRECTORY, 0)) < 0) {
if (*++proc_path != NULL)
continue;
goto failure;
}
if (sys_fstat(proc, &proc_sb) < 0)
goto failure;
/* Since we are suspending threads, we cannot call any libc
* functions that might acquire locks. Most notably, we cannot
* call malloc(). So, we have to allocate memory on the stack,
* instead. Since we do not know how much memory we need, we
* make a best guess. And if we guessed incorrectly we retry on
* a second iteration (by jumping to "detach_threads").
*
* Unless the number of threads is increasing very rapidly, we
* should never need to do so, though, as our guestimate is very
* conservative.
*/
if (max_threads < proc_sb.st_nlink + 100)
max_threads = proc_sb.st_nlink + 100;
/* scope */ {
pid_t pids[max_threads];
int added_entries = 0;
sig_num_threads = num_threads;
sig_pids = pids;
for (;;) {
struct KERNEL_DIRENT *entry;
char buf[4096];
ssize_t nbytes = GETDENTS(proc, (struct KERNEL_DIRENT *)buf,
sizeof(buf));
if (nbytes < 0)
goto failure;
else if (nbytes == 0) {
if (added_entries) {
/* Need to keep iterating over "/proc" in multiple
* passes until we no longer find any more threads. This
* algorithm eventually completes, when all threads have
* been suspended.
*/
added_entries = 0;
sys_lseek(proc, 0, SEEK_SET);
continue;
}
break;
}
for (entry = (struct KERNEL_DIRENT *)buf;
entry < (struct KERNEL_DIRENT *)&buf[nbytes];
entry = (struct KERNEL_DIRENT *)((char *)entry+entry->d_reclen)) {
if (entry->d_ino != 0) {
const char *ptr = entry->d_name;
pid_t pid;
/* Some kernels hide threads by preceding the pid with a '.' */
if (*ptr == '.')
ptr++;
/* If the directory is not numeric, it cannot be a
* process/thread
*/
if (*ptr < '0' || *ptr > '9')
continue;
pid = local_atoi(ptr);
/* Attach (and suspend) all threads */
if (pid && pid != clone_pid) {
struct kernel_stat tmp_sb;
char fname[entry->d_reclen + 48];
strcat(strcat(strcpy(fname, "/proc/"),
entry->d_name), marker_path);
/* Check if the marker is identical to the one we created */
if (sys_stat(fname, &tmp_sb) >= 0 &&
marker_sb.st_ino == tmp_sb.st_ino) {
long i, j;
/* Found one of our threads, make sure it is no duplicate */
for (i = 0; i < num_threads; i++) {
/* Linear search is slow, but should not matter much for
* the typically small number of threads.
*/
if (pids[i] == pid) {
/* Found a duplicate; most likely on second pass */
goto next_entry;
}
}
/* Check whether data structure needs growing */
if (num_threads >= max_threads) {
/* Back to square one, this time with more memory */
NO_INTR(sys_close(proc));
goto detach_threads;
}
/* Attaching to thread suspends it */
pids[num_threads++] = pid;
sig_num_threads = num_threads;
if (sys_ptrace(PTRACE_ATTACH, pid, (void *)0,
(void *)0) < 0) {
/* If operation failed, ignore thread. Maybe it
* just died? There might also be a race
* condition with a concurrent core dumper or
* with a debugger. In that case, we will just
* make a best effort, rather than failing
* entirely.
*/
num_threads--;
sig_num_threads = num_threads;
goto next_entry;
}
while (sys_waitpid(pid, (int *)0, __WALL) < 0) {
if (errno != EINTR) {
sys_ptrace_detach(pid);
num_threads--;
sig_num_threads = num_threads;
goto next_entry;
}
}
if (sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i++ != j ||
sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i != j) {
/* Address spaces are distinct, even though both
* processes show the "marker". This is probably
* a forked child process rather than a thread.
*/
sys_ptrace_detach(pid);
num_threads--;
sig_num_threads = num_threads;
} else {
found_parent |= pid == ppid;
added_entries++;
}
}
}
}
next_entry:;
}
}
NO_INTR(sys_close(proc));
sig_proc = proc = -1;
/* If we failed to find any threads, try looking somewhere else in
* /proc. Maybe, threads are reported differently on this system.
*/
if (num_threads > 1 || !*++proc_path) {
NO_INTR(sys_close(marker));
sig_marker = marker = -1;
/* If we never found the parent process, something is very wrong.
* Most likely, we are running in debugger. Any attempt to operate
* on the threads would be very incomplete. Let's just report an
* error to the caller.
*/
if (!found_parent) {
TCMalloc_ResumeAllProcessThreads(num_threads, pids);
sys__exit(3);
}
/* Now we are ready to call the callback,
* which takes care of resuming the threads for us.
*/
args->result = args->callback(args->parameter, num_threads,
pids, args->ap);
args->err = errno;
/* Callback should have resumed threads, but better safe than sorry */
if (TCMalloc_ResumeAllProcessThreads(num_threads, pids)) {
/* Callback forgot to resume at least one thread, report error */
args->err = EINVAL;
args->result = -1;
}
sys__exit(0);
}
detach_threads:
/* Resume all threads prior to retrying the operation */
TCMalloc_ResumeAllProcessThreads(num_threads, pids);
sig_pids = NULL;
num_threads = 0;
sig_num_threads = num_threads;
max_threads += 100;
}
}
}
/* This function gets the list of all linux threads of the current process
* passes them to the 'callback' along with the 'parameter' pointer; at the
* call back call time all the threads are paused via
* PTRACE_ATTACH.
* The callback is executed from a separate thread which shares only the
* address space, the filesystem, and the filehandles with the caller. Most
* notably, it does not share the same pid and ppid; and if it terminates,
* the rest of the application is still there. 'callback' is supposed to do
* or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if
* the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
* signals are blocked. If the 'callback' decides to unblock them, it must
* ensure that they cannot terminate the application, or that
* TCMalloc_ResumeAllProcessThreads will get called.
* It is an error for the 'callback' to make any library calls that could
* acquire locks. Most notably, this means that most system calls have to
* avoid going through libc. Also, this means that it is not legal to call
* exit() or abort().
* We return -1 on error and the return value of 'callback' on success.
*/
int TCMalloc_ListAllProcessThreads(void *parameter,
ListAllProcessThreadsCallBack callback, ...) {
char altstack_mem[ALT_STACKSIZE];
struct ListerParams args;
pid_t clone_pid;
int dumpable = 1, sig;
struct kernel_sigset_t sig_blocked, sig_old;
sem_t lock;
va_start(args.ap, callback);
/* If we are short on virtual memory, initializing the alternate stack
* might trigger a SIGSEGV. Let's do this early, before it could get us
* into more trouble (i.e. before signal handlers try to use the alternate
* stack, and before we attach to other threads).
*/
memset(altstack_mem, 0, sizeof(altstack_mem));
/* Some of our cleanup functions could conceivable use more stack space.
* Try to touch the stack right now. This could be defeated by the compiler
* being too smart for it's own good, so try really hard.
*/
DirtyStack(32768);
/* Make this process "dumpable". This is necessary in order to ptrace()
* after having called setuid().
*/
dumpable = sys_prctl(PR_GET_DUMPABLE, 0);
if (!dumpable)
sys_prctl(PR_SET_DUMPABLE, 1);
/* Fill in argument block for dumper thread */
args.result = -1;
args.err = 0;
args.altstack_mem = altstack_mem;
args.parameter = parameter;
args.callback = callback;
args.lock = &lock;
/* Before cloning the thread lister, block all asynchronous signals, as we */
/* are not prepared to handle them. */
sys_sigfillset(&sig_blocked);
for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
sys_sigdelset(&sig_blocked, sync_signals[sig]);
}
if (sys_sigprocmask(SIG_BLOCK, &sig_blocked, &sig_old)) {
args.err = errno;
args.result = -1;
goto failed;
}
/* scope */ {
/* After cloning, both the parent and the child share the same instance
* of errno. We must make sure that at least one of these processes
* (in our case, the parent) uses modified syscall macros that update
* a local copy of errno, instead.
*/
#ifdef __cplusplus
#define sys0_sigprocmask sys.sigprocmask
#define sys0_waitpid sys.waitpid
SysCalls sys;
#else
int my_errno;
#define SYS_ERRNO my_errno
#define SYS_INLINE inline
#define SYS_PREFIX 0
#undef SYS_LINUX_SYSCALL_SUPPORT_H
#include "linux_syscall_support.h"
#endif
/* Lock before clone so that parent can set
* ptrace permissions (if necessary) prior
* to ListerThread actually executing
*/
if (sem_init(&lock, 0, 0) == 0) {
int clone_errno;
clone_pid = local_clone((int (*)(void *))ListerThread, &args);
clone_errno = errno;
sys_sigprocmask(SIG_SETMASK, &sig_old, &sig_old);
if (clone_pid >= 0) {
#ifdef PR_SET_PTRACER
/* In newer versions of glibc permission must explicitly
* be given to allow for ptrace.
*/
prctl(PR_SET_PTRACER, clone_pid, 0, 0, 0);
#endif
/* Releasing the lock here allows the
* ListerThread to execute and ptrace us.
*/
sem_post(&lock);
int status, rc;
while ((rc = sys0_waitpid(clone_pid, &status, __WALL)) < 0 &&
ERRNO == EINTR) {
/* Keep waiting */
}
if (rc < 0) {
args.err = ERRNO;
args.result = -1;
} else if (WIFEXITED(status)) {
switch (WEXITSTATUS(status)) {
case 0: break; /* Normal process termination */
case 2: args.err = EFAULT; /* Some fault (e.g. SIGSEGV) detected */
args.result = -1;
break;
case 3: args.err = EPERM; /* Process is already being traced */
args.result = -1;
break;
default:args.err = ECHILD; /* Child died unexpectedly */
args.result = -1;
break;
}
} else if (!WIFEXITED(status)) {
args.err = EFAULT; /* Terminated due to an unhandled signal*/
args.result = -1;
}
sem_destroy(&lock);
} else {
args.result = -1;
args.err = clone_errno;
}
} else {
args.result = -1;
args.err = errno;
}
}
/* Restore the "dumpable" state of the process */
failed:
if (!dumpable)
sys_prctl(PR_SET_DUMPABLE, dumpable);
va_end(args.ap);
errno = args.err;
return args.result;
}
/* This function resumes the list of all linux threads that
* TCMalloc_ListAllProcessThreads pauses before giving to its callback.
* The function returns non-zero if at least one thread was
* suspended and has now been resumed.
*/
int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) {
int detached_at_least_one = 0;
while (num_threads-- > 0) {
detached_at_least_one |= sys_ptrace_detach(thread_pids[num_threads]) >= 0;
}
return detached_at_least_one;
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,54 +0,0 @@
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#ifndef _LINUXTHREADS_H
#define _LINUXTHREADS_H
/* Include thread_lister.h to get the interface that we implement for linux.
*/
/* We currently only support certain platforms on Linux. Porting to other
* related platforms should not be difficult.
*/
#if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
defined(__mips__) || defined(__PPC__) || defined(__aarch64__) || \
defined(__s390__)) && defined(__linux)
/* Define the THREADS symbol to make sure that there is exactly one core dumper
* built into the library.
*/
#define THREADS "Linux /proc"
#endif
#endif /* _LINUXTHREADS_H */

View File

@ -1,108 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file just provides storage for FLAGS_verbose.
#include "../config.h"
#include "base/logging.h"
#include "base/commandlineflags.h"
DEFINE_int32(verbose, EnvToInt("PERFTOOLS_VERBOSE", 0),
"Set to numbers >0 for more verbose output, or <0 for less. "
"--verbose == -4 means we log fatal errors only.");
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
// While windows does have a POSIX-compatible API
// (_open/_write/_close), it acquires memory. Using this lower-level
// windows API is the closest we can get to being "raw".
RawFD RawOpenForWriting(const char* filename) {
// CreateFile allocates memory if file_name isn't absolute, so if
// that ever becomes a problem then we ought to compute the absolute
// path on its behalf (perhaps the ntdll/kernel function isn't aware
// of the working directory?)
RawFD fd = CreateFileA(filename, GENERIC_WRITE, 0, NULL,
CREATE_ALWAYS, 0, NULL);
if (fd != kIllegalRawFD && GetLastError() == ERROR_ALREADY_EXISTS)
SetEndOfFile(fd); // truncate the existing file
return fd;
}
void RawWrite(RawFD handle, const char* buf, size_t len) {
while (len > 0) {
DWORD wrote;
BOOL ok = WriteFile(handle, buf, len, &wrote, NULL);
// We do not use an asynchronous file handle, so ok==false means an error
if (!ok) break;
buf += wrote;
len -= wrote;
}
}
void RawClose(RawFD handle) {
CloseHandle(handle);
}
#else // _WIN32 || __CYGWIN__ || __CYGWIN32__
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
// Re-run fn until it doesn't cause EINTR.
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
RawFD RawOpenForWriting(const char* filename) {
return open(filename, O_WRONLY|O_CREAT|O_TRUNC, 0664);
}
void RawWrite(RawFD fd, const char* buf, size_t len) {
while (len > 0) {
ssize_t r;
NO_INTR(r = write(fd, buf, len));
if (r <= 0) break;
buf += r;
len -= r;
}
}
void RawClose(RawFD fd) {
NO_INTR(close(fd));
}
#endif // _WIN32 || __CYGWIN__ || __CYGWIN32__

View File

@ -1,259 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file contains #include information about logging-related stuff.
// Pretty much everybody needs to #include this file so that they can
// log various happenings.
//
#ifndef _LOGGING_H_
#define _LOGGING_H_
#include "../config.h"
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for write()
#endif
#include <string.h> // for strlen(), strcmp()
#include <assert.h>
#include <errno.h> // for errno
#include "base/commandlineflags.h"
// On some systems (like freebsd), we can't call write() at all in a
// global constructor, perhaps because errno hasn't been set up.
// (In windows, we can't call it because it might call malloc.)
// Calling the write syscall is safer (it doesn't set errno), so we
// prefer that. Note we don't care about errno for logging: we just
// do logging on a best-effort basis.
#if defined(_MSC_VER)
#define WRITE_TO_STDERR(buf, len) WriteToStderr(buf, len); // in port.cc
#elif defined(HAVE_SYS_SYSCALL_H)
#include <sys/syscall.h>
#define WRITE_TO_STDERR(buf, len) syscall(SYS_write, STDERR_FILENO, buf, len)
#else
#define WRITE_TO_STDERR(buf, len) write(STDERR_FILENO, buf, len)
#endif
// MSVC and mingw define their own, safe version of vnsprintf (the
// windows one in broken) in port.cc. Everyone else can use the
// version here. We had to give it a unique name for windows.
#ifndef _WIN32
# define perftools_vsnprintf vsnprintf
#endif
// We log all messages at this log-level and below.
// INFO == -1, WARNING == -2, ERROR == -3, FATAL == -4
DECLARE_int32(verbose);
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by NDEBUG, so the check will be executed regardless of
// compilation mode. Therefore, it is safe to do things like:
// CHECK(fp->Write(x) == 4)
// Note we use write instead of printf/puts to avoid the risk we'll
// call malloc().
#define CHECK(condition) \
do { \
if (!(condition)) { \
WRITE_TO_STDERR("Check failed: " #condition "\n", \
sizeof("Check failed: " #condition "\n")-1); \
abort(); \
} \
} while (0)
// This takes a message to print. The name is historical.
#define RAW_CHECK(condition, message) \
do { \
if (!(condition)) { \
WRITE_TO_STDERR("Check failed: " #condition ": " message "\n", \
sizeof("Check failed: " #condition ": " message "\n")-1);\
abort(); \
} \
} while (0)
// This is like RAW_CHECK, but only in debug-mode
#ifdef NDEBUG
enum { DEBUG_MODE = 0 };
#define RAW_DCHECK(condition, message)
#else
enum { DEBUG_MODE = 1 };
#define RAW_DCHECK(condition, message) RAW_CHECK(condition, message)
#endif
// This prints errno as well. Note we use write instead of printf/puts to
// avoid the risk we'll call malloc().
#define PCHECK(condition) \
do { \
if (!(condition)) { \
const int err_no = errno; \
WRITE_TO_STDERR("Check failed: " #condition ": ", \
sizeof("Check failed: " #condition ": ")-1); \
WRITE_TO_STDERR(strerror(err_no), strlen(strerror(err_no))); \
WRITE_TO_STDERR("\n", sizeof("\n")-1); \
abort(); \
} \
} while (0)
// Helper macro for binary operators; prints the two values on error
// Don't use this macro directly in your code, use CHECK_EQ et al below
// WARNING: These don't compile correctly if one of the arguments is a pointer
// and the other is NULL. To work around this, simply static_cast NULL to the
// type of the desired pointer.
// TODO(jandrews): Also print the values in case of failure. Requires some
// sort of type-sensitive ToString() function.
#define CHECK_OP(op, val1, val2) \
do { \
if (!((val1) op (val2))) { \
fprintf(stderr, "Check failed: %s %s %s\n", #val1, #op, #val2); \
abort(); \
} \
} while (0)
#define CHECK_EQ(val1, val2) CHECK_OP(==, val1, val2)
#define CHECK_NE(val1, val2) CHECK_OP(!=, val1, val2)
#define CHECK_LE(val1, val2) CHECK_OP(<=, val1, val2)
#define CHECK_LT(val1, val2) CHECK_OP(< , val1, val2)
#define CHECK_GE(val1, val2) CHECK_OP(>=, val1, val2)
#define CHECK_GT(val1, val2) CHECK_OP(> , val1, val2)
// Synonyms for CHECK_* that are used in some unittests.
#define EXPECT_EQ(val1, val2) CHECK_EQ(val1, val2)
#define EXPECT_NE(val1, val2) CHECK_NE(val1, val2)
#define EXPECT_LE(val1, val2) CHECK_LE(val1, val2)
#define EXPECT_LT(val1, val2) CHECK_LT(val1, val2)
#define EXPECT_GE(val1, val2) CHECK_GE(val1, val2)
#define EXPECT_GT(val1, val2) CHECK_GT(val1, val2)
#define ASSERT_EQ(val1, val2) EXPECT_EQ(val1, val2)
#define ASSERT_NE(val1, val2) EXPECT_NE(val1, val2)
#define ASSERT_LE(val1, val2) EXPECT_LE(val1, val2)
#define ASSERT_LT(val1, val2) EXPECT_LT(val1, val2)
#define ASSERT_GE(val1, val2) EXPECT_GE(val1, val2)
#define ASSERT_GT(val1, val2) EXPECT_GT(val1, val2)
// As are these variants.
#define EXPECT_TRUE(cond) CHECK(cond)
#define EXPECT_FALSE(cond) CHECK(!(cond))
#define EXPECT_STREQ(a, b) CHECK(strcmp(a, b) == 0)
#define ASSERT_TRUE(cond) EXPECT_TRUE(cond)
#define ASSERT_FALSE(cond) EXPECT_FALSE(cond)
#define ASSERT_STREQ(a, b) EXPECT_STREQ(a, b)
// Used for (libc) functions that return -1 and set errno
#define CHECK_ERR(invocation) PCHECK((invocation) != -1)
// A few more checks that only happen in debug mode
#ifdef NDEBUG
#define DCHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2)
#else
#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
#endif
#ifdef ERROR
#undef ERROR // may conflict with ERROR macro on windows
#endif
enum LogSeverity {INFO = -1, WARNING = -2, ERROR = -3, FATAL = -4};
// NOTE: we add a newline to the end of the output if it's not there already
inline void LogPrintf(int severity, const char* pat, va_list ap) {
// We write directly to the stderr file descriptor and avoid FILE
// buffering because that may invoke malloc()
char buf[600];
perftools_vsnprintf(buf, sizeof(buf)-1, pat, ap);
if (buf[0] != '\0' && buf[strlen(buf)-1] != '\n') {
assert(strlen(buf)+1 < sizeof(buf));
strcat(buf, "\n");
}
WRITE_TO_STDERR(buf, strlen(buf));
if ((severity) == FATAL)
abort(); // LOG(FATAL) indicates a big problem, so don't run atexit() calls
}
// Note that since the order of global constructors is unspecified,
// global code that calls RAW_LOG may execute before FLAGS_verbose is set.
// Such code will run with verbosity == 0 no matter what.
#define VLOG_IS_ON(severity) (FLAGS_verbose >= severity)
// In a better world, we'd use __VA_ARGS__, but VC++ 7 doesn't support it.
#define LOG_PRINTF(severity, pat) do { \
if (VLOG_IS_ON(severity)) { \
va_list ap; \
va_start(ap, pat); \
LogPrintf(severity, pat, ap); \
va_end(ap); \
} \
} while (0)
// RAW_LOG is the main function; some synonyms are used in unittests.
inline void RAW_LOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void RAW_VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void LOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void LOG_IF(int lvl, bool cond, const char* pat, ...) {
if (cond) LOG_PRINTF(lvl, pat);
}
// This isn't technically logging, but it's also IO and also is an
// attempt to be "raw" -- that is, to not use any higher-level libc
// routines that might allocate memory or (ideally) try to allocate
// locks. We use an opaque file handle (not necessarily an int)
// to allow even more low-level stuff in the future.
// Like other "raw" routines, these functions are best effort, and
// thus don't return error codes (except RawOpenForWriting()).
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
#ifndef NOMINMAX
#define NOMINMAX // @#!$& windows
#endif
#include <windows.h>
typedef HANDLE RawFD;
const RawFD kIllegalRawFD = INVALID_HANDLE_VALUE;
#else
typedef int RawFD;
const RawFD kIllegalRawFD = -1; // what open returns if it fails
#endif // defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
RawFD RawOpenForWriting(const char* filename); // uses default permissions
void RawWrite(RawFD fd, const char* buf, size_t len);
void RawClose(RawFD fd);
#endif // _LOGGING_H_

View File

@ -1,582 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// A low-level allocator that can be used by other low-level
// modules without introducing dependency cycles.
// This allocator is slow and wasteful of memory;
// it should not be used when performance is key.
#include "base/low_level_alloc.h"
#include "base/dynamic_annotations.h"
#include "base/spinlock.h"
#include "base/logging.h"
#include "malloc_hook-inl.h"
#include <gperftools/malloc_hook.h>
#include <errno.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_MMAP
#include <sys/mman.h>
#endif
#include <new> // for placement-new
// On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
// form of the name instead.
#ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON
#endif
// A first-fit allocator with amortized logarithmic free() time.
LowLevelAlloc::PagesAllocator::~PagesAllocator() {
}
// ---------------------------------------------------------------------------
static const int kMaxLevel = 30;
// We put this class-only struct in a namespace to avoid polluting the
// global namespace with this struct name (thus risking an ODR violation).
namespace low_level_alloc_internal {
// This struct describes one allocated block, or one free block.
struct AllocList {
struct Header {
intptr_t size; // size of entire region, including this field. Must be
// first. Valid in both allocated and unallocated blocks
intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
LowLevelAlloc::Arena *arena; // pointer to parent arena
void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
} header;
// Next two fields: in unallocated blocks: freelist skiplist data
// in allocated blocks: overlaps with client data
int levels; // levels in skiplist used
AllocList *next[kMaxLevel]; // actually has levels elements.
// The AllocList node may not have room for
// all kMaxLevel entries. See max_fit in
// LLA_SkiplistLevels()
};
}
using low_level_alloc_internal::AllocList;
// ---------------------------------------------------------------------------
// A trivial skiplist implementation. This is used to keep the freelist
// in address order while taking only logarithmic time per insert and delete.
// An integer approximation of log2(size/base)
// Requires size >= base.
static int IntLog2(size_t size, size_t base) {
int result = 0;
for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
result++;
}
// floor(size / 2**result) <= base < floor(size / 2**(result-1))
// => log2(size/(base+1)) <= result < 1+log2(size/base)
// => result ~= log2(size/base)
return result;
}
// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
static int Random() {
static uint32 r = 1; // no locking---it's not critical
ANNOTATE_BENIGN_RACE(&r, "benign race, not critical.");
int result = 1;
while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
result++;
}
return result;
}
// Return a number of skiplist levels for a node of size bytes, where
// base is the minimum node size. Compute level=log2(size / base)+n
// where n is 1 if random is false and otherwise a random number generated with
// the standard distribution for a skiplist: See Random() above.
// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
// term, so first-fit searches touch fewer nodes. "level" is clipped so
// level<kMaxLevel and next[level-1] will fit in the node.
// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
static int LLA_SkiplistLevels(size_t size, size_t base, bool random) {
// max_fit is the maximum number of levels that will fit in a node for the
// given size. We can't return more than max_fit, no matter what the
// random number generator says.
int max_fit = (size-OFFSETOF_MEMBER(AllocList, next)) / sizeof (AllocList *);
int level = IntLog2(size, base) + (random? Random() : 1);
if (level > max_fit) level = max_fit;
if (level > kMaxLevel-1) level = kMaxLevel - 1;
RAW_CHECK(level >= 1, "block not big enough for even one level");
return level;
}
// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
// points to the last element at level i in the AllocList less than *e, or is
// head if no such element exists.
static AllocList *LLA_SkiplistSearch(AllocList *head,
AllocList *e, AllocList **prev) {
AllocList *p = head;
for (int level = head->levels - 1; level >= 0; level--) {
for (AllocList *n; (n = p->next[level]) != 0 && n < e; p = n) {
}
prev[level] = p;
}
return (head->levels == 0) ? 0 : prev[0]->next[0];
}
// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
// Requires that e->levels be previously set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
AllocList **prev) {
LLA_SkiplistSearch(head, e, prev);
for (; head->levels < e->levels; head->levels++) { // extend prev pointers
prev[head->levels] = head; // to all *e's levels
}
for (int i = 0; i != e->levels; i++) { // add element to list
e->next[i] = prev[i]->next[i];
prev[i]->next[i] = e;
}
}
// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
// Requires that e->levels be previous set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
AllocList **prev) {
AllocList *found = LLA_SkiplistSearch(head, e, prev);
RAW_CHECK(e == found, "element not in freelist");
for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
prev[i]->next[i] = e->next[i];
}
while (head->levels > 0 && head->next[head->levels - 1] == 0) {
head->levels--; // reduce head->levels if level unused
}
}
// ---------------------------------------------------------------------------
// Arena implementation
struct LowLevelAlloc::Arena {
Arena() : mu(SpinLock::LINKER_INITIALIZED) {} // does nothing; for static init
explicit Arena(int) : pagesize(0) {} // set pagesize to zero explicitly
// for non-static init
SpinLock mu; // protects freelist, allocation_count,
// pagesize, roundup, min_size
AllocList freelist; // head of free list; sorted by addr (under mu)
int32 allocation_count; // count of allocated blocks (under mu)
int32 flags; // flags passed to NewArena (ro after init)
size_t pagesize; // ==getpagesize() (init under mu, then ro)
size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList))
// (init under mu, then ro)
size_t min_size; // smallest allocation block size
// (init under mu, then ro)
PagesAllocator *allocator;
};
// The default arena, which is used when 0 is passed instead of an Arena
// pointer.
static struct LowLevelAlloc::Arena default_arena;
// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
// do not want malloc hook reporting, so that for them there's no malloc hook
// reporting even during arena creation.
static struct LowLevelAlloc::Arena unhooked_arena;
static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
namespace {
class DefaultPagesAllocator : public LowLevelAlloc::PagesAllocator {
public:
virtual ~DefaultPagesAllocator() {};
virtual void *MapPages(int32 flags, size_t size);
virtual void UnMapPages(int32 flags, void *addr, size_t size);
};
}
// magic numbers to identify allocated and unallocated blocks
static const intptr_t kMagicAllocated = 0x4c833e95;
static const intptr_t kMagicUnallocated = ~kMagicAllocated;
namespace {
class SCOPED_LOCKABLE ArenaLock {
public:
explicit ArenaLock(LowLevelAlloc::Arena *arena)
EXCLUSIVE_LOCK_FUNCTION(arena->mu)
: left_(false), mask_valid_(false), arena_(arena) {
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
// We've decided not to support async-signal-safe arena use until
// there a demonstrated need. Here's how one could do it though
// (would need to be made more portable).
#if 0
sigset_t all;
sigfillset(&all);
this->mask_valid_ =
(pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
#else
RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
#endif
}
this->arena_->mu.Lock();
}
~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
void Leave() /*UNLOCK_FUNCTION()*/ {
this->arena_->mu.Unlock();
#if 0
if (this->mask_valid_) {
pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
}
#endif
this->left_ = true;
}
private:
bool left_; // whether left region
bool mask_valid_;
#if 0
sigset_t mask_; // old mask of blocked signals
#endif
LowLevelAlloc::Arena *arena_;
DISALLOW_COPY_AND_ASSIGN(ArenaLock);
};
} // anonymous namespace
// create an appropriate magic number for an object at "ptr"
// "magic" should be kMagicAllocated or kMagicUnallocated
inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
return magic ^ reinterpret_cast<intptr_t>(ptr);
}
// Initialize the fields of an Arena
static void ArenaInit(LowLevelAlloc::Arena *arena) {
if (arena->pagesize == 0) {
arena->pagesize = getpagesize();
// Round up block sizes to a power of two close to the header size.
arena->roundup = 16;
while (arena->roundup < sizeof (arena->freelist.header)) {
arena->roundup += arena->roundup;
}
// Don't allocate blocks less than twice the roundup size to avoid tiny
// free blocks.
arena->min_size = 2 * arena->roundup;
arena->freelist.header.size = 0;
arena->freelist.header.magic =
Magic(kMagicUnallocated, &arena->freelist.header);
arena->freelist.header.arena = arena;
arena->freelist.levels = 0;
memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
arena->allocation_count = 0;
if (arena == &default_arena) {
// Default arena should be hooked, e.g. for heap-checker to trace
// pointer chains through objects in the default arena.
arena->flags = LowLevelAlloc::kCallMallocHook;
} else if (arena == &unhooked_async_sig_safe_arena) {
arena->flags = LowLevelAlloc::kAsyncSignalSafe;
} else {
arena->flags = 0; // other arenas' flags may be overridden by client,
// but unhooked_arena will have 0 in 'flags'.
}
arena->allocator = LowLevelAlloc::GetDefaultPagesAllocator();
}
}
// L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
Arena *meta_data_arena) {
return NewArenaWithCustomAlloc(flags, meta_data_arena, NULL);
}
// L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32 flags,
Arena *meta_data_arena,
PagesAllocator *allocator) {
RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
if (meta_data_arena == &default_arena) {
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
meta_data_arena = &unhooked_async_sig_safe_arena;
} else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
meta_data_arena = &unhooked_arena;
}
}
// Arena(0) uses the constructor for non-static contexts
Arena *result =
new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
ArenaInit(result);
result->flags = flags;
if (allocator) {
result->allocator = allocator;
}
return result;
}
// L < arena->mu, L < arena->arena->mu
bool LowLevelAlloc::DeleteArena(Arena *arena) {
RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
"may not delete default arena");
ArenaLock section(arena);
bool empty = (arena->allocation_count == 0);
section.Leave();
if (empty) {
while (arena->freelist.next[0] != 0) {
AllocList *region = arena->freelist.next[0];
size_t size = region->header.size;
arena->freelist.next[0] = region->next[0];
RAW_CHECK(region->header.magic ==
Magic(kMagicUnallocated, &region->header),
"bad magic number in DeleteArena()");
RAW_CHECK(region->header.arena == arena,
"bad arena pointer in DeleteArena()");
RAW_CHECK(size % arena->pagesize == 0,
"empty arena has non-page-aligned block size");
RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
"empty arena has non-page-aligned block");
int munmap_result;
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
munmap_result = munmap(region, size);
} else {
munmap_result = MallocHook::UnhookedMUnmap(region, size);
}
RAW_CHECK(munmap_result == 0,
"LowLevelAlloc::DeleteArena: munmap failed address");
}
Free(arena);
}
return empty;
}
// ---------------------------------------------------------------------------
// Return value rounded up to next multiple of align.
// align must be a power of two.
static intptr_t RoundUp(intptr_t addr, intptr_t align) {
return (addr + align - 1) & ~(align - 1);
}
// Equivalent to "return prev->next[i]" but with sanity checking
// that the freelist is in the correct order, that it
// consists of regions marked "unallocated", and that no two regions
// are adjacent in memory (they should have been coalesced).
// L < arena->mu
static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
RAW_CHECK(i < prev->levels, "too few levels in Next()");
AllocList *next = prev->next[i];
if (next != 0) {
RAW_CHECK(next->header.magic == Magic(kMagicUnallocated, &next->header),
"bad magic number in Next()");
RAW_CHECK(next->header.arena == arena,
"bad arena pointer in Next()");
if (prev != &arena->freelist) {
RAW_CHECK(prev < next, "unordered freelist");
RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
reinterpret_cast<char *>(next), "malformed freelist");
}
}
return next;
}
// Coalesce list item "a" with its successor if they are adjacent.
static void Coalesce(AllocList *a) {
AllocList *n = a->next[0];
if (n != 0 && reinterpret_cast<char *>(a) + a->header.size ==
reinterpret_cast<char *>(n)) {
LowLevelAlloc::Arena *arena = a->header.arena;
a->header.size += n->header.size;
n->header.magic = 0;
n->header.arena = 0;
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, n, prev);
LLA_SkiplistDelete(&arena->freelist, a, prev);
a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, true);
LLA_SkiplistInsert(&arena->freelist, a, prev);
}
}
// Adds block at location "v" to the free list
// L >= arena->mu
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
AllocList *f = reinterpret_cast<AllocList *>(
reinterpret_cast<char *>(v) - sizeof (f->header));
RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in AddToFreelist()");
RAW_CHECK(f->header.arena == arena,
"bad arena pointer in AddToFreelist()");
f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, true);
AllocList *prev[kMaxLevel];
LLA_SkiplistInsert(&arena->freelist, f, prev);
f->header.magic = Magic(kMagicUnallocated, &f->header);
Coalesce(f); // maybe coalesce with successor
Coalesce(prev[0]); // maybe coalesce with predecessor
}
// Frees storage allocated by LowLevelAlloc::Alloc().
// L < arena->mu
void LowLevelAlloc::Free(void *v) {
if (v != 0) {
AllocList *f = reinterpret_cast<AllocList *>(
reinterpret_cast<char *>(v) - sizeof (f->header));
RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in Free()");
LowLevelAlloc::Arena *arena = f->header.arena;
if ((arena->flags & kCallMallocHook) != 0) {
MallocHook::InvokeDeleteHook(v);
}
ArenaLock section(arena);
AddToFreelist(v, arena);
RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
arena->allocation_count--;
section.Leave();
}
}
// allocates and returns a block of size bytes, to be freed with Free()
// L < arena->mu
static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
void *result = 0;
if (request != 0) {
AllocList *s; // will point to region that satisfies request
ArenaLock section(arena);
ArenaInit(arena);
// round up with header
size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
for (;;) { // loop until we find a suitable region
// find the minimum levels that a block of this size must have
int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
if (i < arena->freelist.levels) { // potential blocks exist
AllocList *before = &arena->freelist; // predecessor of s
while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
before = s;
}
if (s != 0) { // we found a region
break;
}
}
// we unlock before mmap() both because mmap() may call a callback hook,
// and because it may be slow.
arena->mu.Unlock();
// mmap generous 64K chunks to decrease
// the chances/impact of fragmentation:
size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
void *new_pages = arena->allocator->MapPages(arena->flags, new_pages_size);
arena->mu.Lock();
s = reinterpret_cast<AllocList *>(new_pages);
s->header.size = new_pages_size;
// Pretend the block is allocated; call AddToFreelist() to free it.
s->header.magic = Magic(kMagicAllocated, &s->header);
s->header.arena = arena;
AddToFreelist(&s->levels, arena); // insert new region into free list
}
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
// s points to the first free region that's big enough
if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
AllocList *n = reinterpret_cast<AllocList *>
(req_rnd + reinterpret_cast<char *>(s));
n->header.size = s->header.size - req_rnd;
n->header.magic = Magic(kMagicAllocated, &n->header);
n->header.arena = arena;
s->header.size = req_rnd;
AddToFreelist(&n->levels, arena);
}
s->header.magic = Magic(kMagicAllocated, &s->header);
RAW_CHECK(s->header.arena == arena, "");
arena->allocation_count++;
section.Leave();
result = &s->levels;
}
ANNOTATE_NEW_MEMORY(result, request);
return result;
}
void *LowLevelAlloc::Alloc(size_t request) {
void *result = DoAllocWithArena(request, &default_arena);
if ((default_arena.flags & kCallMallocHook) != 0) {
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly
MallocHook::InvokeNewHook(result, request);
}
return result;
}
void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
RAW_CHECK(arena != 0, "must pass a valid arena");
void *result = DoAllocWithArena(request, arena);
if ((arena->flags & kCallMallocHook) != 0) {
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly
MallocHook::InvokeNewHook(result, request);
}
return result;
}
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
return &default_arena;
}
static DefaultPagesAllocator *default_pages_allocator;
static union {
char chars[sizeof(DefaultPagesAllocator)];
void *ptr;
} debug_pages_allocator_space;
LowLevelAlloc::PagesAllocator *LowLevelAlloc::GetDefaultPagesAllocator(void) {
if (default_pages_allocator) {
return default_pages_allocator;
}
default_pages_allocator = new (debug_pages_allocator_space.chars) DefaultPagesAllocator();
return default_pages_allocator;
}
void *DefaultPagesAllocator::MapPages(int32 flags, size_t size) {
void *new_pages;
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
new_pages = MallocHook::UnhookedMMap(0, size,
PROT_WRITE|PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
} else {
new_pages = mmap(0, size,
PROT_WRITE|PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
}
RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
return new_pages;
}
void DefaultPagesAllocator::UnMapPages(int32 flags, void *region, size_t size) {
int munmap_result;
if ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
munmap_result = munmap(region, size);
} else {
munmap_result = MallocHook::UnhookedMUnmap(region, size);
}
RAW_CHECK(munmap_result == 0,
"LowLevelAlloc::DeleteArena: munmap failed address");
}

View File

@ -1,120 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if !defined(_BASE_LOW_LEVEL_ALLOC_H_)
#define _BASE_LOW_LEVEL_ALLOC_H_
// A simple thread-safe memory allocator that does not depend on
// mutexes or thread-specific data. It is intended to be used
// sparingly, and only when malloc() would introduce an unwanted
// dependency, such as inside the heap-checker.
#include "../config.h"
#include <stddef.h> // for size_t
#include "base/basictypes.h"
class LowLevelAlloc {
public:
class PagesAllocator {
public:
virtual ~PagesAllocator();
virtual void *MapPages(int32 flags, size_t size) = 0;
virtual void UnMapPages(int32 flags, void *addr, size_t size) = 0;
};
static PagesAllocator *GetDefaultPagesAllocator(void);
struct Arena; // an arena from which memory may be allocated
// Returns a pointer to a block of at least "request" bytes
// that have been newly allocated from the specific arena.
// for Alloc() call the DefaultArena() is used.
// Returns 0 if passed request==0.
// Does not return 0 under other circumstances; it crashes if memory
// is not available.
static void *Alloc(size_t request)
ATTRIBUTE_SECTION(malloc_hook);
static void *AllocWithArena(size_t request, Arena *arena)
ATTRIBUTE_SECTION(malloc_hook);
// Deallocates a region of memory that was previously allocated with
// Alloc(). Does nothing if passed 0. "s" must be either 0,
// or must have been returned from a call to Alloc() and not yet passed to
// Free() since that call to Alloc(). The space is returned to the arena
// from which it was allocated.
static void Free(void *s) ATTRIBUTE_SECTION(malloc_hook);
// ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
// are to put all callers of MallocHook::Invoke* in this module
// into special section,
// so that MallocHook::GetCallerStackTrace can function accurately.
// Create a new arena.
// The root metadata for the new arena is allocated in the
// meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
// These values may be ored into flags:
enum {
// Report calls to Alloc() and Free() via the MallocHook interface.
// Set in the DefaultArena.
kCallMallocHook = 0x0001,
// Make calls to Alloc(), Free() be async-signal-safe. Not set in
// DefaultArena().
kAsyncSignalSafe = 0x0002,
// When used with DefaultArena(), the NewArena() and DeleteArena() calls
// obey the flags given explicitly in the NewArena() call, even if those
// flags differ from the settings in DefaultArena(). So the call
// NewArena(kAsyncSignalSafe, DefaultArena()) is itself async-signal-safe,
// as well as generatating an arena that provides async-signal-safe
// Alloc/Free.
};
static Arena *NewArena(int32 flags, Arena *meta_data_arena);
// note: pages allocator will never be destroyed and allocated pages will never be freed
// When allocator is NULL, it's same as NewArena
static Arena *NewArenaWithCustomAlloc(int32 flags, Arena *meta_data_arena, PagesAllocator *allocator);
// Destroys an arena allocated by NewArena and returns true,
// provided no allocated blocks remain in the arena.
// If allocated blocks remain in the arena, does nothing and
// returns false.
// It is illegal to attempt to destroy the DefaultArena().
static bool DeleteArena(Arena *arena);
// The default arena that always exists.
static Arena *DefaultArena();
private:
LowLevelAlloc(); // no instances
};
#endif

View File

@ -1,332 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// ---
// Author: Craig Silverstein.
//
// A simple mutex wrapper, supporting locks and read-write locks.
// You should assume the locks are *not* re-entrant.
//
// To use: you should define the following macros in your configure.ac:
// ACX_PTHREAD
// AC_RWLOCK
// The latter is defined in ../autoconf.
//
// This class is meant to be internal-only and should be wrapped by an
// internal namespace. Before you use this module, please give the
// name of your internal namespace for this module. Or, if you want
// to expose it, you'll want to move it to the Google namespace. We
// cannot put this class in global namespace because there can be some
// problems when we have multiple versions of Mutex in each shared object.
//
// NOTE: TryLock() is broken for NO_THREADS mode, at least in NDEBUG
// mode.
//
// CYGWIN NOTE: Cygwin support for rwlock seems to be buggy:
// http://www.cygwin.com/ml/cygwin/2008-12/msg00017.html
// Because of that, we might as well use windows locks for
// cygwin. They seem to be more reliable than the cygwin pthreads layer.
//
// TRICKY IMPLEMENTATION NOTE:
// This class is designed to be safe to use during
// dynamic-initialization -- that is, by global constructors that are
// run before main() starts. The issue in this case is that
// dynamic-initialization happens in an unpredictable order, and it
// could be that someone else's dynamic initializer could call a
// function that tries to acquire this mutex -- but that all happens
// before this mutex's constructor has run. (This can happen even if
// the mutex and the function that uses the mutex are in the same .cc
// file.) Basically, because Mutex does non-trivial work in its
// constructor, it's not, in the naive implementation, safe to use
// before dynamic initialization has run on it.
//
// The solution used here is to pair the actual mutex primitive with a
// bool that is set to true when the mutex is dynamically initialized.
// (Before that it's false.) Then we modify all mutex routines to
// look at the bool, and not try to lock/unlock until the bool makes
// it to true (which happens after the Mutex constructor has run.)
//
// This works because before main() starts -- particularly, during
// dynamic initialization -- there are no threads, so a) it's ok that
// the mutex operations are a no-op, since we don't need locking then
// anyway; and b) we can be quite confident our bool won't change
// state between a call to Lock() and a call to Unlock() (that would
// require a global constructor in one translation unit to call Lock()
// and another global constructor in another translation unit to call
// Unlock() later, which is pretty perverse).
//
// That said, it's tricky, and can conceivably fail; it's safest to
// avoid trying to acquire a mutex in a global constructor, if you
// can. One way it can fail is that a really smart compiler might
// initialize the bool to true at static-initialization time (too
// early) rather than at dynamic-initialization time. To discourage
// that, we set is_safe_ to true in code (not the constructor
// colon-initializer) and set it to true via a function that always
// evaluates to true, but that the compiler can't know always
// evaluates to true. This should be good enough.
//
// A related issue is code that could try to access the mutex
// after it's been destroyed in the global destructors (because
// the Mutex global destructor runs before some other global
// destructor, that tries to acquire the mutex). The way we
// deal with this is by taking a constructor arg that global
// mutexes should pass in, that causes the destructor to do no
// work. We still depend on the compiler not doing anything
// weird to a Mutex's memory after it is destroyed, but for a
// static global variable, that's pretty safe.
#ifndef GOOGLE_MUTEX_H_
#define GOOGLE_MUTEX_H_
#include "../config.h"
#if defined(NO_THREADS)
typedef int MutexType; // to keep a lock-count
#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN // We only need minimal includes
# endif
// We need Windows NT or later for TryEnterCriticalSection(). If you
// don't need that functionality, you can remove these _WIN32_WINNT
// lines, and change TryLock() to assert(0) or something.
# ifndef _WIN32_WINNT
# define _WIN32_WINNT 0x0400
# endif
# include <windows.h>
typedef CRITICAL_SECTION MutexType;
#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
// Needed for pthread_rwlock_*. If it causes problems, you could take it
// out, but then you'd have to unset HAVE_RWLOCK (at least on linux -- it
// *does* cause problems for FreeBSD, or MacOSX, but isn't needed
// for locking there.)
# ifdef __linux__
# define _XOPEN_SOURCE 500 // may be needed to get the rwlock calls
# endif
# include <pthread.h>
typedef pthread_rwlock_t MutexType;
#elif defined(HAVE_PTHREAD)
# include <pthread.h>
typedef pthread_mutex_t MutexType;
#else
# error Need to implement mutex.h for your architecture, or #define NO_THREADS
#endif
#include <assert.h>
#include <stdlib.h> // for abort()
#define MUTEX_NAMESPACE perftools_mutex_namespace
namespace MUTEX_NAMESPACE {
class Mutex {
public:
// This is used for the single-arg constructor
enum LinkerInitialized { LINKER_INITIALIZED };
// Create a Mutex that is not held by anybody. This constructor is
// typically used for Mutexes allocated on the heap or the stack.
inline Mutex();
// This constructor should be used for global, static Mutex objects.
// It inhibits work being done by the destructor, which makes it
// safer for code that tries to acqiure this mutex in their global
// destructor.
inline Mutex(LinkerInitialized);
// Destructor
inline ~Mutex();
inline void Lock(); // Block if needed until free then acquire exclusively
inline void Unlock(); // Release a lock acquired via Lock()
inline bool TryLock(); // If free, Lock() and return true, else return false
// Note that on systems that don't support read-write locks, these may
// be implemented as synonyms to Lock() and Unlock(). So you can use
// these for efficiency, but don't use them anyplace where being able
// to do shared reads is necessary to avoid deadlock.
inline void ReaderLock(); // Block until free or shared then acquire a share
inline void ReaderUnlock(); // Release a read share of this Mutex
inline void WriterLock() { Lock(); } // Acquire an exclusive lock
inline void WriterUnlock() { Unlock(); } // Release a lock from WriterLock()
private:
MutexType mutex_;
// We want to make sure that the compiler sets is_safe_ to true only
// when we tell it to, and never makes assumptions is_safe_ is
// always true. volatile is the most reliable way to do that.
volatile bool is_safe_;
// This indicates which constructor was called.
bool destroy_;
inline void SetIsSafe() { is_safe_ = true; }
// Catch the error of writing Mutex when intending MutexLock.
Mutex(Mutex* /*ignored*/) {}
// Disallow "evil" constructors
Mutex(const Mutex&);
void operator=(const Mutex&);
};
// Now the implementation of Mutex for various systems
#if defined(NO_THREADS)
// When we don't have threads, we can be either reading or writing,
// but not both. We can have lots of readers at once (in no-threads
// mode, that's most likely to happen in recursive function calls),
// but only one writer. We represent this by having mutex_ be -1 when
// writing and a number > 0 when reading (and 0 when no lock is held).
//
// In debug mode, we assert these invariants, while in non-debug mode
// we do nothing, for efficiency. That's why everything is in an
// assert.
Mutex::Mutex() : mutex_(0) { }
Mutex::Mutex(Mutex::LinkerInitialized) : mutex_(0) { }
Mutex::~Mutex() { assert(mutex_ == 0); }
void Mutex::Lock() { assert(--mutex_ == -1); }
void Mutex::Unlock() { assert(mutex_++ == -1); }
bool Mutex::TryLock() { if (mutex_) return false; Lock(); return true; }
void Mutex::ReaderLock() { assert(++mutex_ > 0); }
void Mutex::ReaderUnlock() { assert(mutex_-- > 0); }
#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
Mutex::Mutex() : destroy_(true) {
InitializeCriticalSection(&mutex_);
SetIsSafe();
}
Mutex::Mutex(LinkerInitialized) : destroy_(false) {
InitializeCriticalSection(&mutex_);
SetIsSafe();
}
Mutex::~Mutex() { if (destroy_) DeleteCriticalSection(&mutex_); }
void Mutex::Lock() { if (is_safe_) EnterCriticalSection(&mutex_); }
void Mutex::Unlock() { if (is_safe_) LeaveCriticalSection(&mutex_); }
bool Mutex::TryLock() { return is_safe_ ?
TryEnterCriticalSection(&mutex_) != 0 : true; }
void Mutex::ReaderLock() { Lock(); } // we don't have read-write locks
void Mutex::ReaderUnlock() { Unlock(); }
#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
if (is_safe_ && fncall(&mutex_) != 0) abort(); \
} while (0)
Mutex::Mutex() : destroy_(true) {
SetIsSafe();
if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
}
Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
SetIsSafe();
if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
}
Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_rwlock_destroy); }
void Mutex::Lock() { SAFE_PTHREAD(pthread_rwlock_wrlock); }
void Mutex::Unlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
bool Mutex::TryLock() { return is_safe_ ?
pthread_rwlock_trywrlock(&mutex_) == 0 : true; }
void Mutex::ReaderLock() { SAFE_PTHREAD(pthread_rwlock_rdlock); }
void Mutex::ReaderUnlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
#undef SAFE_PTHREAD
#elif defined(HAVE_PTHREAD)
#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
if (is_safe_ && fncall(&mutex_) != 0) abort(); \
} while (0)
Mutex::Mutex() : destroy_(true) {
SetIsSafe();
if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
}
Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
SetIsSafe();
if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
}
Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_mutex_destroy); }
void Mutex::Lock() { SAFE_PTHREAD(pthread_mutex_lock); }
void Mutex::Unlock() { SAFE_PTHREAD(pthread_mutex_unlock); }
bool Mutex::TryLock() { return is_safe_ ?
pthread_mutex_trylock(&mutex_) == 0 : true; }
void Mutex::ReaderLock() { Lock(); }
void Mutex::ReaderUnlock() { Unlock(); }
#undef SAFE_PTHREAD
#endif
// --------------------------------------------------------------------------
// Some helper classes
// MutexLock(mu) acquires mu when constructed and releases it when destroyed.
class MutexLock {
public:
explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
~MutexLock() { mu_->Unlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
MutexLock(const MutexLock&);
void operator=(const MutexLock&);
};
// ReaderMutexLock and WriterMutexLock do the same, for rwlocks
class ReaderMutexLock {
public:
explicit ReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
~ReaderMutexLock() { mu_->ReaderUnlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
ReaderMutexLock(const ReaderMutexLock&);
void operator=(const ReaderMutexLock&);
};
class WriterMutexLock {
public:
explicit WriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
~WriterMutexLock() { mu_->WriterUnlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
WriterMutexLock(const WriterMutexLock&);
void operator=(const WriterMutexLock&);
};
// Catch bug where variable name is omitted, e.g. MutexLock (&mu);
#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_decl_missing_var_name)
#define ReaderMutexLock(x) COMPILE_ASSERT(0, rmutex_lock_decl_missing_var_name)
#define WriterMutexLock(x) COMPILE_ASSERT(0, wmutex_lock_decl_missing_var_name)
} // namespace MUTEX_NAMESPACE
using namespace MUTEX_NAMESPACE;
#undef MUTEX_NAMESPACE
#endif /* #define GOOGLE_SIMPLE_MUTEX_H_ */

View File

@ -1,129 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
#include "../config.h"
#include "base/spinlock.h"
#include "base/spinlock_internal.h"
#include "base/sysinfo.h" /* for GetSystemCPUsCount() */
// NOTE on the Lock-state values:
//
// kSpinLockFree represents the unlocked state
// kSpinLockHeld represents the locked state with no waiters
// kSpinLockSleeper represents the locked state with waiters
static int adaptive_spin_count = 0;
const base::LinkerInitialized SpinLock::LINKER_INITIALIZED =
base::LINKER_INITIALIZED;
namespace {
struct SpinLock_InitHelper {
SpinLock_InitHelper() {
// On multi-cpu machines, spin for longer before yielding
// the processor or sleeping. Reduces idle time significantly.
if (GetSystemCPUsCount() > 1) {
adaptive_spin_count = 1000;
}
}
};
// Hook into global constructor execution:
// We do not do adaptive spinning before that,
// but nothing lock-intensive should be going on at that time.
static SpinLock_InitHelper init_helper;
inline void SpinlockPause(void) {
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
__asm__ __volatile__("rep; nop" : : );
#endif
}
} // unnamed namespace
// Monitor the lock to see if its value changes within some time
// period (adaptive_spin_count loop iterations). The last value read
// from the lock is returned from the method.
Atomic32 SpinLock::SpinLoop() {
int c = adaptive_spin_count;
while (base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree && --c > 0) {
SpinlockPause();
}
return base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockSleeper);
}
void SpinLock::SlowLock() {
Atomic32 lock_value = SpinLoop();
int lock_wait_call_count = 0;
while (lock_value != kSpinLockFree) {
// If the lock is currently held, but not marked as having a sleeper, mark
// it as having a sleeper.
if (lock_value == kSpinLockHeld) {
// Here, just "mark" that the thread is going to sleep. Don't store the
// lock wait time in the lock as that will cause the current lock
// owner to think it experienced contention.
lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
kSpinLockHeld,
kSpinLockSleeper);
if (lock_value == kSpinLockHeld) {
// Successfully transitioned to kSpinLockSleeper. Pass
// kSpinLockSleeper to the SpinLockDelay routine to properly indicate
// the last lock_value observed.
lock_value = kSpinLockSleeper;
} else if (lock_value == kSpinLockFree) {
// Lock is free again, so try and acquire it before sleeping. The
// new lock state will be the number of cycles this thread waited if
// this thread obtains the lock.
lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
kSpinLockFree,
kSpinLockSleeper);
continue; // skip the delay at the end of the loop
}
}
// Wait for an OS specific delay.
base::internal::SpinLockDelay(&lockword_, lock_value,
++lock_wait_call_count);
// Spin again after returning from the wait routine to give this thread
// some chance of obtaining the lock.
lock_value = SpinLoop();
}
}
void SpinLock::SlowUnlock() {
// wake waiter if necessary
base::internal::SpinLockWake(&lockword_, false);
}

View File

@ -1,143 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// SpinLock is async signal safe.
// If used within a signal handler, all lock holders
// should block the signal even outside the signal handler.
#ifndef BASE_SPINLOCK_H_
#define BASE_SPINLOCK_H_
#include "../config.h"
#include "base/atomicops.h"
#include "base/basictypes.h"
#include "base/dynamic_annotations.h"
#include "base/thread_annotations.h"
class LOCKABLE SpinLock {
public:
SpinLock() : lockword_(kSpinLockFree) { }
// Special constructor for use with static SpinLock objects. E.g.,
//
// static SpinLock lock(base::LINKER_INITIALIZED);
//
// When intialized using this constructor, we depend on the fact
// that the linker has already initialized the memory appropriately.
// A SpinLock constructed like this can be freely used from global
// initializers without worrying about the order in which global
// initializers run.
explicit SpinLock(base::LinkerInitialized /*x*/) {
// Does nothing; lockword_ is already initialized
}
// Acquire this SpinLock.
// TODO(csilvers): uncomment the annotation when we figure out how to
// support this macro with 0 args (see thread_annotations.h)
inline void Lock() /*EXCLUSIVE_LOCK_FUNCTION()*/ {
if (base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockHeld) != kSpinLockFree) {
SlowLock();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
// Try to acquire this SpinLock without blocking and return true if the
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
bool res =
(base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockHeld) == kSpinLockFree);
if (res) {
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
return res;
}
// Release this SpinLock, which must be held by the calling thread.
// TODO(csilvers): uncomment the annotation when we figure out how to
// support this macro with 0 args (see thread_annotations.h)
inline void Unlock() /*UNLOCK_FUNCTION()*/ {
ANNOTATE_RWLOCK_RELEASED(this, 1);
uint64 prev_value = static_cast<uint64>(
base::subtle::Release_AtomicExchange(&lockword_, kSpinLockFree));
if (prev_value != kSpinLockHeld) {
// Speed the wakeup of any waiter.
SlowUnlock();
}
}
// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
inline bool IsHeld() const {
return base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree;
}
static const base::LinkerInitialized LINKER_INITIALIZED; // backwards compat
private:
enum { kSpinLockFree = 0 };
enum { kSpinLockHeld = 1 };
enum { kSpinLockSleeper = 2 };
volatile Atomic32 lockword_;
void SlowLock();
void SlowUnlock();
Atomic32 SpinLoop();
DISALLOW_COPY_AND_ASSIGN(SpinLock);
};
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
class SCOPED_LOCKABLE SpinLockHolder {
private:
SpinLock* lock_;
public:
inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
: lock_(l) {
l->Lock();
}
// TODO(csilvers): uncomment the annotation when we figure out how to
// support this macro with 0 args (see thread_annotations.h)
inline ~SpinLockHolder() /*UNLOCK_FUNCTION()*/ { lock_->Unlock(); }
};
// Catch bug where variable name is omitted, e.g. SpinLockHolder (&lock);
#define SpinLockHolder(x) COMPILE_ASSERT(0, spin_lock_decl_missing_var_name)
#endif // BASE_SPINLOCK_H_

View File

@ -1,102 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2010, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// The OS-specific header included below must provide two calls:
// base::internal::SpinLockDelay() and base::internal::SpinLockWake().
// See spinlock_internal.h for the spec of SpinLockWake().
// void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop)
// SpinLockDelay() generates an apprproate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
// or may wait for a delay that can be truncated by a call to SpinlockWake(w).
// In all cases, it must return in bounded time even if SpinlockWake() is not
// called.
#include "base/spinlock_internal.h"
// forward declaration for use by spinlock_*-inl.h
namespace base { namespace internal { static int SuggestedDelayNS(int loop); }}
#if defined(_WIN32)
#include "base/spinlock_win32-inl.h"
#elif defined(__linux__)
#include "base/spinlock_linux-inl.h"
#else
#include "base/spinlock_posix-inl.h"
#endif
namespace base {
namespace internal {
// Return a suggested delay in nanoseconds for iteration number "loop"
static int SuggestedDelayNS(int loop) {
// Weak pseudo-random number generator to get some spread between threads
// when many are spinning.
#ifdef BASE_HAS_ATOMIC64
static base::subtle::Atomic64 rand;
uint64 r = base::subtle::NoBarrier_Load(&rand);
r = 0x5deece66dLL * r + 0xb; // numbers from nrand48()
base::subtle::NoBarrier_Store(&rand, r);
r <<= 16; // 48-bit random number now in top 48-bits.
if (loop < 0 || loop > 32) { // limit loop to 0..32
loop = 32;
}
// loop>>3 cannot exceed 4 because loop cannot exceed 32.
// Select top 20..24 bits of lower 48 bits,
// giving approximately 0ms to 16ms.
// Mean is exponential in loop for first 32 iterations, then 8ms.
// The futex path multiplies this by 16, since we expect explicit wakeups
// almost always on that path.
return r >> (44 - (loop >> 3));
#else
static Atomic32 rand;
uint32 r = base::subtle::NoBarrier_Load(&rand);
r = 0x343fd * r + 0x269ec3; // numbers from MSVC++
base::subtle::NoBarrier_Store(&rand, r);
r <<= 1; // 31-bit random number now in top 31-bits.
if (loop < 0 || loop > 32) { // limit loop to 0..32
loop = 32;
}
// loop>>3 cannot exceed 4 because loop cannot exceed 32.
// Select top 20..24 bits of lower 31 bits,
// giving approximately 0ms to 16ms.
// Mean is exponential in loop for first 32 iterations, then 8ms.
// The futex path multiplies this by 16, since we expect explicit wakeups
// almost always on that path.
return r >> (12 - (loop >> 3));
#endif
}
} // namespace internal
} // namespace base

View File

@ -1,51 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2010, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is an internal part spinlock.cc and once.cc
* It may not be used directly by code outside of //base.
*/
#ifndef BASE_SPINLOCK_INTERNAL_H_
#define BASE_SPINLOCK_INTERNAL_H_
#include "../config.h"
#include "base/basictypes.h"
#include "base/atomicops.h"
namespace base {
namespace internal {
void SpinLockWake(volatile Atomic32 *w, bool all);
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop);
} // namespace internal
} // namespace base
#endif

View File

@ -1,101 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Linux-specific part of spinlock_internal.cc
*/
#include <errno.h>
#include <sched.h>
#include <time.h>
#include <limits.h>
#include "base/linux_syscall_support.h"
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_PRIVATE_FLAG 128
static bool have_futex;
static int futex_private_flag = FUTEX_PRIVATE_FLAG;
namespace {
static struct InitModule {
InitModule() {
int x = 0;
// futexes are ints, so we can use them only when
// that's the same size as the lockword_ in SpinLock.
have_futex = (sizeof (Atomic32) == sizeof (int) &&
sys_futex(&x, FUTEX_WAKE, 1, NULL, NULL, 0) >= 0);
if (have_futex &&
sys_futex(&x, FUTEX_WAKE | futex_private_flag, 1, NULL, NULL, 0) < 0) {
futex_private_flag = 0;
}
}
} init_module;
} // anonymous namespace
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
if (loop != 0) {
int save_errno = errno;
struct timespec tm;
tm.tv_sec = 0;
if (have_futex) {
tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
} else {
tm.tv_nsec = 2000001; // above 2ms so linux 2.4 doesn't spin
}
if (have_futex) {
tm.tv_nsec *= 16; // increase the delay; we expect explicit wakeups
sys_futex(reinterpret_cast<int *>(const_cast<Atomic32 *>(w)),
FUTEX_WAIT | futex_private_flag,
value, reinterpret_cast<struct kernel_timespec *>(&tm),
NULL, 0);
} else {
nanosleep(&tm, NULL);
}
errno = save_errno;
}
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
if (have_futex) {
sys_futex(reinterpret_cast<int *>(const_cast<Atomic32 *>(w)),
FUTEX_WAKE | futex_private_flag, all? INT_MAX : 1,
NULL, NULL, 0);
}
}
} // namespace internal
} // namespace base

View File

@ -1,63 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Posix-specific part of spinlock_internal.cc
*/
#include "../config.h"
#include <errno.h>
#ifdef HAVE_SCHED_H
#include <sched.h> /* For sched_yield() */
#endif
#include <time.h> /* For nanosleep() */
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
int save_errno = errno;
if (loop == 0) {
} else if (loop == 1) {
sched_yield();
} else {
struct timespec tm;
tm.tv_sec = 0;
tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
nanosleep(&tm, NULL);
}
errno = save_errno;
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
}
} // namespace internal
} // namespace base

View File

@ -1,54 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Win32-specific part of spinlock_internal.cc
*/
#include <windows.h>
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
if (loop == 0) {
} else if (loop == 1) {
Sleep(0);
} else {
Sleep(base::internal::SuggestedDelayNS(loop) / 1000000);
}
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
}
} // namespace internal
} // namespace base

View File

@ -1,98 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Maxim Lifantsev
*/
#ifndef BASE_STL_ALLOCATOR_H_
#define BASE_STL_ALLOCATOR_H_
#include "../config.h"
#include <stddef.h> // for ptrdiff_t
#include <limits>
#include "base/logging.h"
// Generic allocator class for STL objects
// that uses a given type-less allocator Alloc, which must provide:
// static void* Alloc::Allocate(size_t size);
// static void Alloc::Free(void* ptr, size_t size);
//
// STL_Allocator<T, MyAlloc> provides the same thread-safety
// guarantees as MyAlloc.
//
// Usage example:
// set<T, less<T>, STL_Allocator<T, MyAlloc> > my_set;
// CAVEAT: Parts of the code below are probably specific
// to the STL version(s) we are using.
// The code is simply lifted from what std::allocator<> provides.
template <typename T, class Alloc>
class STL_Allocator {
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template <class T1> struct rebind {
typedef STL_Allocator<T1, Alloc> other;
};
STL_Allocator() { }
STL_Allocator(const STL_Allocator&) { }
template <class T1> STL_Allocator(const STL_Allocator<T1, Alloc>&) { }
~STL_Allocator() { }
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, const void* = 0) {
RAW_DCHECK((n * sizeof(T)) / sizeof(T) == n, "n is too big to allocate");
return static_cast<T*>(Alloc::Allocate(n * sizeof(T)));
}
void deallocate(pointer p, size_type n) { Alloc::Free(p, n * sizeof(T)); }
size_type max_size() const { return size_t(-1) / sizeof(T); }
void construct(pointer p, const T& val) { ::new(p) T(val); }
void construct(pointer p) { ::new(p) T(); }
void destroy(pointer p) { p->~T(); }
// There's no state, so these allocators are always equal
bool operator==(const STL_Allocator&) const { return true; }
};
#endif // BASE_STL_ALLOCATOR_H_

View File

@ -1,860 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../config.h"
#if (defined(_WIN32) || defined(__MINGW32__)) && !defined(__CYGWIN__) && !defined(__CYGWIN32)
# define PLATFORM_WINDOWS 1
#endif
#include <ctype.h> // for isspace()
#include <stdlib.h> // for getenv()
#include <stdio.h> // for snprintf(), sscanf()
#include <string.h> // for memmove(), memchr(), etc.
#include <fcntl.h> // for open()
#include <errno.h> // for errno
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for read()
#endif
#if defined __MACH__ // Mac OS X, almost certainly
#include <mach-o/dyld.h> // for iterating over dll's in ProcMapsIter
#include <mach-o/loader.h> // for iterating over dll's in ProcMapsIter
#include <sys/types.h>
#include <sys/sysctl.h> // how we figure out numcpu's on OS X
#elif defined __FreeBSD__
#include <sys/sysctl.h>
#elif defined __sun__ // Solaris
#include <procfs.h> // for, e.g., prmap_t
#elif defined(PLATFORM_WINDOWS)
#include <process.h> // for getpid() (actually, _getpid())
#include <shlwapi.h> // for SHGetValueA()
#include <tlhelp32.h> // for Module32First()
#endif
#include "base/sysinfo.h"
#include "base/commandlineflags.h"
#include "base/dynamic_annotations.h" // for RunningOnValgrind
#include "base/logging.h"
#ifdef PLATFORM_WINDOWS
#ifdef MODULEENTRY32
// In a change from the usual W-A pattern, there is no A variant of
// MODULEENTRY32. Tlhelp32.h #defines the W variant, but not the A.
// In unicode mode, tlhelp32.h #defines MODULEENTRY32 to be
// MODULEENTRY32W. These #undefs are the only way I see to get back
// access to the original, ascii struct (and related functions).
#undef MODULEENTRY32
#undef Module32First
#undef Module32Next
#undef PMODULEENTRY32
#undef LPMODULEENTRY32
#endif /* MODULEENTRY32 */
// MinGW doesn't seem to define this, perhaps some windowsen don't either.
#ifndef TH32CS_SNAPMODULE32
#define TH32CS_SNAPMODULE32 0
#endif /* TH32CS_SNAPMODULE32 */
#endif /* PLATFORM_WINDOWS */
// Re-run fn until it doesn't cause EINTR.
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
// open/read/close can set errno, which may be illegal at this
// time, so prefer making the syscalls directly if we can.
#ifdef HAVE_SYS_SYSCALL_H
# include <sys/syscall.h>
#endif
#ifdef SYS_open // solaris 11, at least sometimes, only defines SYS_openat
# define safeopen(filename, mode) syscall(SYS_open, filename, mode)
#else
# define safeopen(filename, mode) open(filename, mode)
#endif
#ifdef SYS_read
# define saferead(fd, buffer, size) syscall(SYS_read, fd, buffer, size)
#else
# define saferead(fd, buffer, size) read(fd, buffer, size)
#endif
#ifdef SYS_close
# define safeclose(fd) syscall(SYS_close, fd)
#else
# define safeclose(fd) close(fd)
#endif
// ----------------------------------------------------------------------
// GetenvBeforeMain()
// GetUniquePathFromEnv()
// Some non-trivial getenv-related functions.
// ----------------------------------------------------------------------
// It's not safe to call getenv() in the malloc hooks, because they
// might be called extremely early, before libc is done setting up
// correctly. In particular, the thread library may not be done
// setting up errno. So instead, we use the built-in __environ array
// if it exists, and otherwise read /proc/self/environ directly, using
// system calls to read the file, and thus avoid setting errno.
// /proc/self/environ has a limit of how much data it exports (around
// 8K), so it's not an ideal solution.
const char* GetenvBeforeMain(const char* name) {
#if defined(HAVE___ENVIRON) // if we have it, it's declared in unistd.h
if (__environ) { // can exist but be NULL, if statically linked
const int namelen = strlen(name);
for (char** p = __environ; *p; p++) {
if (strlen(*p) < namelen) {
continue;
}
if (!memcmp(*p, name, namelen) && (*p)[namelen] == '=') // it's a match
return *p + namelen+1; // point after =
}
return NULL;
}
#endif
#if defined(PLATFORM_WINDOWS)
// TODO(mbelshe) - repeated calls to this function will overwrite the
// contents of the static buffer.
static char envvar_buf[1024]; // enough to hold any envvar we care about
if (!GetEnvironmentVariableA(name, envvar_buf, sizeof(envvar_buf)-1))
return NULL;
return envvar_buf;
#endif
// static is ok because this function should only be called before
// main(), when we're single-threaded.
static char envbuf[16<<10];
if (*envbuf == '\0') { // haven't read the environ yet
int fd = safeopen("/proc/self/environ", O_RDONLY);
// The -2 below guarantees the last two bytes of the buffer will be \0\0
if (fd == -1 || // unable to open the file, fall back onto libc
saferead(fd, envbuf, sizeof(envbuf) - 2) < 0) { // error reading file
RAW_VLOG(1, "Unable to open /proc/self/environ, falling back "
"on getenv(\"%s\"), which may not work", name);
if (fd != -1) safeclose(fd);
return getenv(name);
}
safeclose(fd);
}
const int namelen = strlen(name);
const char* p = envbuf;
while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
// proc file has the format NAME=value\0NAME=value\0NAME=value\0...
const char* endp = (char*)memchr(p, '\0', sizeof(envbuf) - (p - envbuf));
if (endp == NULL) // this entry isn't NUL terminated
return NULL;
else if (!memcmp(p, name, namelen) && p[namelen] == '=') // it's a match
return p + namelen+1; // point after =
p = endp + 1;
}
return NULL; // env var never found
}
extern "C" {
const char* TCMallocGetenvSafe(const char* name) {
return GetenvBeforeMain(name);
}
}
// This takes as an argument an environment-variable name (like
// CPUPROFILE) whose value is supposed to be a file-path, and sets
// path to that path, and returns true. If the env var doesn't exist,
// or is the empty string, leave path unchanged and returns false.
// The reason this is non-trivial is that this function handles munged
// pathnames. Here's why:
//
// If we're a child process of the 'main' process, we can't just use
// getenv("CPUPROFILE") -- the parent process will be using that path.
// Instead we append our pid to the pathname. How do we tell if we're a
// child process? Ideally we'd set an environment variable that all
// our children would inherit. But -- and this is seemingly a bug in
// gcc -- if you do a setenv() in a shared libarary in a global
// constructor, the environment setting is lost by the time main() is
// called. The only safe thing we can do in such a situation is to
// modify the existing envvar. So we do a hack: in the parent, we set
// the high bit of the 1st char of CPUPROFILE. In the child, we
// notice the high bit is set and append the pid(). This works
// assuming cpuprofile filenames don't normally have the high bit set
// in their first character! If that assumption is violated, we'll
// still get a profile, but one with an unexpected name.
// TODO(csilvers): set an envvar instead when we can do it reliably.
bool GetUniquePathFromEnv(const char* env_name, char* path) {
char* envval = getenv(env_name);
if (envval == NULL || *envval == '\0')
return false;
if (envval[0] & 128) { // high bit is set
snprintf(path, PATH_MAX, "%c%s_%u", // add pid and clear high bit
envval[0] & 127, envval+1, (unsigned int)(getpid()));
} else {
snprintf(path, PATH_MAX, "%s", envval);
envval[0] |= 128; // set high bit for kids to see
}
return true;
}
void SleepForMilliseconds(int milliseconds) {
#ifdef PLATFORM_WINDOWS
_sleep(milliseconds); // Windows's _sleep takes milliseconds argument
#else
// Sleep for a few milliseconds
struct timespec sleep_time;
sleep_time.tv_sec = milliseconds / 1000;
sleep_time.tv_nsec = (milliseconds % 1000) * 1000000;
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
; // Ignore signals and wait for the full interval to elapse.
#endif
}
int GetSystemCPUsCount()
{
#if defined(PLATFORM_WINDOWS)
// Get the number of processors.
SYSTEM_INFO info;
GetSystemInfo(&info);
return info.dwNumberOfProcessors;
#else
long rv = sysconf(_SC_NPROCESSORS_ONLN);
if (rv < 0) {
return 1;
}
return static_cast<int>(rv);
#endif
}
// ----------------------------------------------------------------------
#if defined __linux__ || defined __FreeBSD__ || defined __sun__ || defined __CYGWIN__ || defined __CYGWIN32__
static void ConstructFilename(const char* spec, pid_t pid,
char* buf, int buf_size) {
CHECK_LT(snprintf(buf, buf_size,
spec,
static_cast<int>(pid ? pid : getpid())), buf_size);
}
#endif
// A templatized helper function instantiated for Mach (OS X) only.
// It can handle finding info for both 32 bits and 64 bits.
// Returns true if it successfully handled the hdr, false else.
#ifdef __MACH__ // Mac OS X, almost certainly
template<uint32_t kMagic, uint32_t kLCSegment,
typename MachHeader, typename SegmentCommand>
static bool NextExtMachHelper(const mach_header* hdr,
int current_image, int current_load_cmd,
uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename,
uint64 *file_mapping, uint64 *file_pages,
uint64 *anon_mapping, uint64 *anon_pages,
dev_t *dev) {
static char kDefaultPerms[5] = "r-xp";
if (hdr->magic != kMagic)
return false;
const char* lc = (const char *)hdr + sizeof(MachHeader);
// TODO(csilvers): make this not-quadradic (increment and hold state)
for (int j = 0; j < current_load_cmd; j++) // advance to *our* load_cmd
lc += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
const intptr_t dlloff = _dyld_get_image_vmaddr_slide(current_image);
const SegmentCommand* sc = (const SegmentCommand *)lc;
if (start) *start = sc->vmaddr + dlloff;
if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
if (flags) *flags = kDefaultPerms; // can we do better?
if (offset) *offset = sc->fileoff;
if (inode) *inode = 0;
if (filename)
*filename = const_cast<char*>(_dyld_get_image_name(current_image));
if (file_mapping) *file_mapping = 0;
if (file_pages) *file_pages = 0; // could we use sc->filesize?
if (anon_mapping) *anon_mapping = 0;
if (anon_pages) *anon_pages = 0;
if (dev) *dev = 0;
return true;
}
return false;
}
#endif
// Finds |c| in |text|, and assign '\0' at the found position.
// The original character at the modified position should be |c|.
// A pointer to the modified position is stored in |endptr|.
// |endptr| should not be NULL.
static bool ExtractUntilChar(char *text, int c, char **endptr) {
CHECK_NE(text, NULL);
CHECK_NE(endptr, NULL);
char *found;
found = strchr(text, c);
if (found == NULL) {
*endptr = NULL;
return false;
}
*endptr = found;
*found = '\0';
return true;
}
// Increments |*text_pointer| while it points a whitespace character.
// It is to follow sscanf's whilespace handling.
static void SkipWhileWhitespace(char **text_pointer, int c) {
if (isspace(c)) {
while (isspace(**text_pointer) && isspace(*((*text_pointer) + 1))) {
++(*text_pointer);
}
}
}
template<class T>
static T StringToInteger(char *text, char **endptr, int base) {
assert(false);
return T();
}
template<>
int StringToInteger<int>(char *text, char **endptr, int base) {
return strtol(text, endptr, base);
}
template<>
int64 StringToInteger<int64>(char *text, char **endptr, int base) {
return strtoll(text, endptr, base);
}
template<>
uint64 StringToInteger<uint64>(char *text, char **endptr, int base) {
return strtoull(text, endptr, base);
}
template<typename T>
static T StringToIntegerUntilChar(
char *text, int base, int c, char **endptr_result) {
CHECK_NE(endptr_result, NULL);
*endptr_result = NULL;
char *endptr_extract;
if (!ExtractUntilChar(text, c, &endptr_extract))
return 0;
T result;
char *endptr_strto;
result = StringToInteger<T>(text, &endptr_strto, base);
*endptr_extract = c;
if (endptr_extract != endptr_strto)
return 0;
*endptr_result = endptr_extract;
SkipWhileWhitespace(endptr_result, c);
return result;
}
static char *CopyStringUntilChar(
char *text, unsigned out_len, int c, char *out) {
char *endptr;
if (!ExtractUntilChar(text, c, &endptr))
return NULL;
strncpy(out, text, out_len);
out[out_len-1] = '\0';
*endptr = c;
SkipWhileWhitespace(&endptr, c);
return endptr;
}
template<typename T>
static bool StringToIntegerUntilCharWithCheck(
T *outptr, char *text, int base, int c, char **endptr) {
*outptr = StringToIntegerUntilChar<T>(*endptr, base, c, endptr);
if (*endptr == NULL || **endptr == '\0') return false;
++(*endptr);
return true;
}
static bool ParseProcMapsLine(char *text, uint64 *start, uint64 *end,
char *flags, uint64 *offset,
int *major, int *minor, int64 *inode,
unsigned *filename_offset) {
#if defined(__linux__)
/*
* It's similar to:
* sscanf(text, "%"SCNx64"-%"SCNx64" %4s %"SCNx64" %x:%x %"SCNd64" %n",
* start, end, flags, offset, major, minor, inode, filename_offset)
*/
char *endptr = text;
if (endptr == NULL || *endptr == '\0') return false;
if (!StringToIntegerUntilCharWithCheck(start, endptr, 16, '-', &endptr))
return false;
if (!StringToIntegerUntilCharWithCheck(end, endptr, 16, ' ', &endptr))
return false;
endptr = CopyStringUntilChar(endptr, 5, ' ', flags);
if (endptr == NULL || *endptr == '\0') return false;
++endptr;
if (!StringToIntegerUntilCharWithCheck(offset, endptr, 16, ' ', &endptr))
return false;
if (!StringToIntegerUntilCharWithCheck(major, endptr, 16, ':', &endptr))
return false;
if (!StringToIntegerUntilCharWithCheck(minor, endptr, 16, ' ', &endptr))
return false;
if (!StringToIntegerUntilCharWithCheck(inode, endptr, 10, ' ', &endptr))
return false;
*filename_offset = (endptr - text);
return true;
#else
return false;
#endif
}
ProcMapsIterator::ProcMapsIterator(pid_t pid) {
Init(pid, NULL, false);
}
ProcMapsIterator::ProcMapsIterator(pid_t pid, Buffer *buffer) {
Init(pid, buffer, false);
}
ProcMapsIterator::ProcMapsIterator(pid_t pid, Buffer *buffer,
bool use_maps_backing) {
Init(pid, buffer, use_maps_backing);
}
void ProcMapsIterator::Init(pid_t pid, Buffer *buffer,
bool use_maps_backing) {
pid_ = pid;
using_maps_backing_ = use_maps_backing;
dynamic_buffer_ = NULL;
if (!buffer) {
// If the user didn't pass in any buffer storage, allocate it
// now. This is the normal case; the signal handler passes in a
// static buffer.
buffer = dynamic_buffer_ = new Buffer;
} else {
dynamic_buffer_ = NULL;
}
ibuf_ = buffer->buf_;
stext_ = etext_ = nextline_ = ibuf_;
ebuf_ = ibuf_ + Buffer::kBufSize - 1;
nextline_ = ibuf_;
#if defined(__linux__) || defined(__CYGWIN__) || defined(__CYGWIN32__)
if (use_maps_backing) { // don't bother with clever "self" stuff in this case
ConstructFilename("/proc/%d/maps_backing", pid, ibuf_, Buffer::kBufSize);
} else if (pid == 0) {
// We have to kludge a bit to deal with the args ConstructFilename
// expects. The 1 is never used -- it's only impt. that it's not 0.
ConstructFilename("/proc/self/maps", 1, ibuf_, Buffer::kBufSize);
} else {
ConstructFilename("/proc/%d/maps", pid, ibuf_, Buffer::kBufSize);
}
// No error logging since this can be called from the crash dump
// handler at awkward moments. Users should call Valid() before
// using.
NO_INTR(fd_ = open(ibuf_, O_RDONLY));
#elif defined(__FreeBSD__)
// We don't support maps_backing on freebsd
if (pid == 0) {
ConstructFilename("/proc/curproc/map", 1, ibuf_, Buffer::kBufSize);
} else {
ConstructFilename("/proc/%d/map", pid, ibuf_, Buffer::kBufSize);
}
NO_INTR(fd_ = open(ibuf_, O_RDONLY));
#elif defined(__sun__)
if (pid == 0) {
ConstructFilename("/proc/self/map", 1, ibuf_, Buffer::kBufSize);
} else {
ConstructFilename("/proc/%d/map", pid, ibuf_, Buffer::kBufSize);
}
NO_INTR(fd_ = open(ibuf_, O_RDONLY));
#elif defined(__MACH__)
current_image_ = _dyld_image_count(); // count down from the top
current_load_cmd_ = -1;
#elif defined(PLATFORM_WINDOWS)
snapshot_ = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE |
TH32CS_SNAPMODULE32,
GetCurrentProcessId());
memset(&module_, 0, sizeof(module_));
#else
fd_ = -1; // so Valid() is always false
#endif
}
ProcMapsIterator::~ProcMapsIterator() {
#if defined(PLATFORM_WINDOWS)
if (snapshot_ != INVALID_HANDLE_VALUE) CloseHandle(snapshot_);
#elif defined(__MACH__)
// no cleanup necessary!
#else
if (fd_ >= 0) NO_INTR(close(fd_));
#endif
delete dynamic_buffer_;
}
bool ProcMapsIterator::Valid() const {
#if defined(PLATFORM_WINDOWS)
return snapshot_ != INVALID_HANDLE_VALUE;
#elif defined(__MACH__)
return 1;
#else
return fd_ != -1;
#endif
}
bool ProcMapsIterator::Next(uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename) {
return NextExt(start, end, flags, offset, inode, filename, NULL, NULL,
NULL, NULL, NULL);
}
// This has too many arguments. It should really be building
// a map object and returning it. The problem is that this is called
// when the memory allocator state is undefined, hence the arguments.
bool ProcMapsIterator::NextExt(uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename,
uint64 *file_mapping, uint64 *file_pages,
uint64 *anon_mapping, uint64 *anon_pages,
dev_t *dev) {
#if defined(__linux__) || defined(__FreeBSD__) || defined(__CYGWIN__) || defined(__CYGWIN32__)
do {
// Advance to the start of the next line
stext_ = nextline_;
// See if we have a complete line in the buffer already
nextline_ = static_cast<char *>(memchr (stext_, '\n', etext_ - stext_));
if (!nextline_) {
// Shift/fill the buffer so we do have a line
int count = etext_ - stext_;
// Move the current text to the start of the buffer
memmove(ibuf_, stext_, count);
stext_ = ibuf_;
etext_ = ibuf_ + count;
int nread = 0; // fill up buffer with text
while (etext_ < ebuf_) {
NO_INTR(nread = read(fd_, etext_, ebuf_ - etext_));
if (nread > 0)
etext_ += nread;
else
break;
}
// Zero out remaining characters in buffer at EOF to avoid returning
// garbage from subsequent calls.
if (etext_ != ebuf_ && nread == 0) {
memset(etext_, 0, ebuf_ - etext_);
}
*etext_ = '\n'; // sentinel; safe because ibuf extends 1 char beyond ebuf
nextline_ = static_cast<char *>(memchr (stext_, '\n', etext_ + 1 - stext_));
}
*nextline_ = 0; // turn newline into nul
nextline_ += ((nextline_ < etext_)? 1 : 0); // skip nul if not end of text
// stext_ now points at a nul-terminated line
uint64 tmpstart, tmpend, tmpoffset;
int64 tmpinode;
int major, minor;
unsigned filename_offset = 0;
#if defined(__linux__)
// for now, assume all linuxes have the same format
if (!ParseProcMapsLine(
stext_,
start ? start : &tmpstart,
end ? end : &tmpend,
flags_,
offset ? offset : &tmpoffset,
&major, &minor,
inode ? inode : &tmpinode, &filename_offset)) continue;
#elif defined(__CYGWIN__) || defined(__CYGWIN32__)
// cygwin is like linux, except the third field is the "entry point"
// rather than the offset (see format_process_maps at
// http://cygwin.com/cgi-bin/cvsweb.cgi/src/winsup/cygwin/fhandler_process.cc?rev=1.89&content-type=text/x-cvsweb-markup&cvsroot=src
// Offset is always be 0 on cygwin: cygwin implements an mmap
// by loading the whole file and then calling NtMapViewOfSection.
// Cygwin also seems to set its flags kinda randomly; use windows default.
char tmpflags[5];
if (offset)
*offset = 0;
strcpy(flags_, "r-xp");
if (sscanf(stext_, "%llx-%llx %4s %llx %x:%x %lld %n",
start ? start : &tmpstart,
end ? end : &tmpend,
tmpflags,
&tmpoffset,
&major, &minor,
inode ? inode : &tmpinode, &filename_offset) != 7) continue;
#elif defined(__FreeBSD__)
// For the format, see http://www.freebsd.org/cgi/cvsweb.cgi/src/sys/fs/procfs/procfs_map.c?rev=1.31&content-type=text/x-cvsweb-markup
tmpstart = tmpend = tmpoffset = 0;
tmpinode = 0;
major = minor = 0; // can't get this info in freebsd
if (inode)
*inode = 0; // nor this
if (offset)
*offset = 0; // seems like this should be in there, but maybe not
// start end resident privateresident obj(?) prot refcnt shadowcnt
// flags copy_on_write needs_copy type filename:
// 0x8048000 0x804a000 2 0 0xc104ce70 r-x 1 0 0x0 COW NC vnode /bin/cat
if (sscanf(stext_, "0x%" SCNx64 " 0x%" SCNx64 " %*d %*d %*p %3s %*d %*d 0x%*x %*s %*s %*s %n",
start ? start : &tmpstart,
end ? end : &tmpend,
flags_,
&filename_offset) != 3) continue;
#endif
// Depending on the Linux kernel being used, there may or may not be a space
// after the inode if there is no filename. sscanf will in such situations
// nondeterministically either fill in filename_offset or not (the results
// differ on multiple calls in the same run even with identical arguments).
// We don't want to wander off somewhere beyond the end of the string.
size_t stext_length = strlen(stext_);
if (filename_offset == 0 || filename_offset > stext_length)
filename_offset = stext_length;
// We found an entry
if (flags) *flags = flags_;
if (filename) *filename = stext_ + filename_offset;
if (dev) *dev = minor | (major << 8);
if (using_maps_backing_) {
// Extract and parse physical page backing info.
char *backing_ptr = stext_ + filename_offset +
strlen(stext_+filename_offset);
// find the second '('
int paren_count = 0;
while (--backing_ptr > stext_) {
if (*backing_ptr == '(') {
++paren_count;
if (paren_count >= 2) {
uint64 tmp_file_mapping;
uint64 tmp_file_pages;
uint64 tmp_anon_mapping;
uint64 tmp_anon_pages;
sscanf(backing_ptr+1, "F %" SCNx64 " %" SCNd64 ") (A %" SCNx64 " %" SCNd64 ")",
file_mapping ? file_mapping : &tmp_file_mapping,
file_pages ? file_pages : &tmp_file_pages,
anon_mapping ? anon_mapping : &tmp_anon_mapping,
anon_pages ? anon_pages : &tmp_anon_pages);
// null terminate the file name (there is a space
// before the first (.
backing_ptr[-1] = 0;
break;
}
}
}
}
return true;
} while (etext_ > ibuf_);
#elif defined(__sun__)
// This is based on MA_READ == 4, MA_WRITE == 2, MA_EXEC == 1
static char kPerms[8][4] = { "---", "--x", "-w-", "-wx",
"r--", "r-x", "rw-", "rwx" };
COMPILE_ASSERT(MA_READ == 4, solaris_ma_read_must_equal_4);
COMPILE_ASSERT(MA_WRITE == 2, solaris_ma_write_must_equal_2);
COMPILE_ASSERT(MA_EXEC == 1, solaris_ma_exec_must_equal_1);
Buffer object_path;
int nread = 0; // fill up buffer with text
NO_INTR(nread = read(fd_, ibuf_, sizeof(prmap_t)));
if (nread == sizeof(prmap_t)) {
long inode_from_mapname = 0;
prmap_t* mapinfo = reinterpret_cast<prmap_t*>(ibuf_);
// Best-effort attempt to get the inode from the filename. I think the
// two middle ints are major and minor device numbers, but I'm not sure.
sscanf(mapinfo->pr_mapname, "ufs.%*d.%*d.%ld", &inode_from_mapname);
if (pid_ == 0) {
CHECK_LT(snprintf(object_path.buf_, Buffer::kBufSize,
"/proc/self/path/%s", mapinfo->pr_mapname),
Buffer::kBufSize);
} else {
CHECK_LT(snprintf(object_path.buf_, Buffer::kBufSize,
"/proc/%d/path/%s",
static_cast<int>(pid_), mapinfo->pr_mapname),
Buffer::kBufSize);
}
ssize_t len = readlink(object_path.buf_, current_filename_, PATH_MAX);
CHECK_LT(len, PATH_MAX);
if (len < 0)
len = 0;
current_filename_[len] = '\0';
if (start) *start = mapinfo->pr_vaddr;
if (end) *end = mapinfo->pr_vaddr + mapinfo->pr_size;
if (flags) *flags = kPerms[mapinfo->pr_mflags & 7];
if (offset) *offset = mapinfo->pr_offset;
if (inode) *inode = inode_from_mapname;
if (filename) *filename = current_filename_;
if (file_mapping) *file_mapping = 0;
if (file_pages) *file_pages = 0;
if (anon_mapping) *anon_mapping = 0;
if (anon_pages) *anon_pages = 0;
if (dev) *dev = 0;
return true;
}
#elif defined(__MACH__)
// We return a separate entry for each segment in the DLL. (TODO(csilvers):
// can we do better?) A DLL ("image") has load-commands, some of which
// talk about segment boundaries.
// cf image_for_address from http://svn.digium.com/view/asterisk/team/oej/minivoicemail/dlfcn.c?revision=53912
for (; current_image_ >= 0; current_image_--) {
const mach_header* hdr = _dyld_get_image_header(current_image_);
if (!hdr) continue;
if (current_load_cmd_ < 0) // set up for this image
current_load_cmd_ = hdr->ncmds; // again, go from the top down
// We start with the next load command (we've already looked at this one).
for (current_load_cmd_--; current_load_cmd_ >= 0; current_load_cmd_--) {
#ifdef MH_MAGIC_64
if (NextExtMachHelper<MH_MAGIC_64, LC_SEGMENT_64,
struct mach_header_64, struct segment_command_64>(
hdr, current_image_, current_load_cmd_,
start, end, flags, offset, inode, filename,
file_mapping, file_pages, anon_mapping,
anon_pages, dev)) {
return true;
}
#endif
if (NextExtMachHelper<MH_MAGIC, LC_SEGMENT,
struct mach_header, struct segment_command>(
hdr, current_image_, current_load_cmd_,
start, end, flags, offset, inode, filename,
file_mapping, file_pages, anon_mapping,
anon_pages, dev)) {
return true;
}
}
// If we get here, no more load_cmd's in this image talk about
// segments. Go on to the next image.
}
#elif defined(PLATFORM_WINDOWS)
static char kDefaultPerms[5] = "r-xp";
BOOL ok;
if (module_.dwSize == 0) { // only possible before first call
module_.dwSize = sizeof(module_);
ok = Module32First(snapshot_, &module_);
} else {
ok = Module32Next(snapshot_, &module_);
}
if (ok) {
uint64 base_addr = reinterpret_cast<DWORD_PTR>(module_.modBaseAddr);
if (start) *start = base_addr;
if (end) *end = base_addr + module_.modBaseSize;
if (flags) *flags = kDefaultPerms;
if (offset) *offset = 0;
if (inode) *inode = 0;
if (filename) *filename = module_.szExePath;
if (file_mapping) *file_mapping = 0;
if (file_pages) *file_pages = 0;
if (anon_mapping) *anon_mapping = 0;
if (anon_pages) *anon_pages = 0;
if (dev) *dev = 0;
return true;
}
#endif
// We didn't find anything
return false;
}
int ProcMapsIterator::FormatLine(char* buffer, int bufsize,
uint64 start, uint64 end, const char *flags,
uint64 offset, int64 inode,
const char *filename, dev_t dev) {
// We assume 'flags' looks like 'rwxp' or 'rwx'.
char r = (flags && flags[0] == 'r') ? 'r' : '-';
char w = (flags && flags[0] && flags[1] == 'w') ? 'w' : '-';
char x = (flags && flags[0] && flags[1] && flags[2] == 'x') ? 'x' : '-';
// p always seems set on linux, so we set the default to 'p', not '-'
char p = (flags && flags[0] && flags[1] && flags[2] && flags[3] != 'p')
? '-' : 'p';
const int rc = snprintf(buffer, bufsize,
"%08" PRIx64 "-%08" PRIx64 " %c%c%c%c %08" PRIx64 " %02x:%02x %-11" PRId64 " %s\n",
start, end, r,w,x,p, offset,
static_cast<int>(dev/256), static_cast<int>(dev%256),
inode, filename);
return (rc < 0 || rc >= bufsize) ? 0 : rc;
}
namespace tcmalloc {
// Helper to add the list of mapped shared libraries to a profile.
// Fill formatted "/proc/self/maps" contents into buffer 'buf' of size 'size'
// and return the actual size occupied in 'buf'. We fill wrote_all to true
// if we successfully wrote all proc lines to buf, false else.
// We do not provision for 0-terminating 'buf'.
int FillProcSelfMaps(char buf[], int size, bool* wrote_all) {
ProcMapsIterator::Buffer iterbuf;
ProcMapsIterator it(0, &iterbuf); // 0 means "current pid"
uint64 start, end, offset;
int64 inode;
char *flags, *filename;
int bytes_written = 0;
*wrote_all = true;
while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) {
const int line_length = it.FormatLine(buf + bytes_written,
size - bytes_written,
start, end, flags, offset,
inode, filename, 0);
if (line_length == 0)
*wrote_all = false; // failed to write this line out
else
bytes_written += line_length;
}
return bytes_written;
}
// Dump the same data as FillProcSelfMaps reads to fd.
// It seems easier to repeat parts of FillProcSelfMaps here than to
// reuse it via a call.
void DumpProcSelfMaps(RawFD fd) {
ProcMapsIterator::Buffer iterbuf;
ProcMapsIterator it(0, &iterbuf); // 0 means "current pid"
uint64 start, end, offset;
int64 inode;
char *flags, *filename;
ProcMapsIterator::Buffer linebuf;
while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) {
int written = it.FormatLine(linebuf.buf_, sizeof(linebuf.buf_),
start, end, flags, offset, inode, filename,
0);
RawWrite(fd, linebuf.buf_, written);
}
}
} // namespace tcmalloc

View File

@ -1,232 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// All functions here are thread-hostile due to file caching unless
// commented otherwise.
#ifndef _SYSINFO_H_
#define _SYSINFO_H_
#include "../config.h"
#include <time.h>
#if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__))
#include <windows.h> // for DWORD
#include <tlhelp32.h> // for CreateToolhelp32Snapshot
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for pid_t
#endif
#include <stddef.h> // for size_t
#include <limits.h> // for PATH_MAX
#include "base/basictypes.h"
#include "base/logging.h" // for RawFD
// This getenv function is safe to call before the C runtime is initialized.
// On Windows, it utilizes GetEnvironmentVariable() and on unix it uses
// /proc/self/environ instead calling getenv(). It's intended to be used in
// routines that run before main(), when the state required for getenv() may
// not be set up yet. In particular, errno isn't set up until relatively late
// (after the pthreads library has a chance to make it threadsafe), and
// getenv() doesn't work until then.
// On some platforms, this call will utilize the same, static buffer for
// repeated GetenvBeforeMain() calls. Callers should not expect pointers from
// this routine to be long lived.
// Note that on unix, /proc only has the environment at the time the
// application was started, so this routine ignores setenv() calls/etc. Also
// note it only reads the first 16K of the environment.
extern const char* GetenvBeforeMain(const char* name);
// This takes as an argument an environment-variable name (like
// CPUPROFILE) whose value is supposed to be a file-path, and sets
// path to that path, and returns true. Non-trivial for surprising
// reasons, as documented in sysinfo.cc. path must have space PATH_MAX.
extern bool GetUniquePathFromEnv(const char* env_name, char* path);
extern int GetSystemCPUsCount();
void SleepForMilliseconds(int milliseconds);
// Return true if we're running POSIX (e.g., NPTL on Linux) threads,
// as opposed to a non-POSIX thread library. The thing that we care
// about is whether a thread's pid is the same as the thread that
// spawned it. If so, this function returns true.
// Thread-safe.
// Note: We consider false negatives to be OK.
bool HasPosixThreads();
#ifndef SWIG // SWIG doesn't like struct Buffer and variable arguments.
// A ProcMapsIterator abstracts access to /proc/maps for a given
// process. Needs to be stack-allocatable and avoid using stdio/malloc
// so it can be used in the google stack dumper, heap-profiler, etc.
//
// On Windows and Mac OS X, this iterator iterates *only* over DLLs
// mapped into this process space. For Linux, FreeBSD, and Solaris,
// it iterates over *all* mapped memory regions, including anonymous
// mmaps. For other O/Ss, it is unlikely to work at all, and Valid()
// will always return false. Also note: this routine only works on
// FreeBSD if procfs is mounted: make sure this is in your /etc/fstab:
// proc /proc procfs rw 0 0
class ProcMapsIterator {
public:
struct Buffer {
#ifdef __FreeBSD__
// FreeBSD requires us to read all of the maps file at once, so
// we have to make a buffer that's "always" big enough
static const size_t kBufSize = 102400;
#else // a one-line buffer is good enough
static const size_t kBufSize = PATH_MAX + 1024;
#endif
char buf_[kBufSize];
};
// Create a new iterator for the specified pid. pid can be 0 for "self".
explicit ProcMapsIterator(pid_t pid);
// Create an iterator with specified storage (for use in signal
// handler). "buffer" should point to a ProcMapsIterator::Buffer
// buffer can be NULL in which case a bufer will be allocated.
ProcMapsIterator(pid_t pid, Buffer *buffer);
// Iterate through maps_backing instead of maps if use_maps_backing
// is true. Otherwise the same as above. buffer can be NULL and
// it will allocate a buffer itself.
ProcMapsIterator(pid_t pid, Buffer *buffer,
bool use_maps_backing);
// Returns true if the iterator successfully initialized;
bool Valid() const;
// Returns a pointer to the most recently parsed line. Only valid
// after Next() returns true, and until the iterator is destroyed or
// Next() is called again. This may give strange results on non-Linux
// systems. Prefer FormatLine() if that may be a concern.
const char *CurrentLine() const { return stext_; }
// Writes the "canonical" form of the /proc/xxx/maps info for a single
// line to the passed-in buffer. Returns the number of bytes written,
// or 0 if it was not able to write the complete line. (To guarantee
// success, buffer should have size at least Buffer::kBufSize.)
// Takes as arguments values set via a call to Next(). The
// "canonical" form of the line (taken from linux's /proc/xxx/maps):
// <start_addr(hex)>-<end_addr(hex)> <perms(rwxp)> <offset(hex)> +
// <major_dev(hex)>:<minor_dev(hex)> <inode> <filename> Note: the
// eg
// 08048000-0804c000 r-xp 00000000 03:01 3793678 /bin/cat
// If you don't have the dev_t (dev), feel free to pass in 0.
// (Next() doesn't return a dev_t, though NextExt does.)
//
// Note: if filename and flags were obtained via a call to Next(),
// then the output of this function is only valid if Next() returned
// true, and only until the iterator is destroyed or Next() is
// called again. (Since filename, at least, points into CurrentLine.)
static int FormatLine(char* buffer, int bufsize,
uint64 start, uint64 end, const char *flags,
uint64 offset, int64 inode, const char *filename,
dev_t dev);
// Find the next entry in /proc/maps; return true if found or false
// if at the end of the file.
//
// Any of the result pointers can be NULL if you're not interested
// in those values.
//
// If "flags" and "filename" are passed, they end up pointing to
// storage within the ProcMapsIterator that is valid only until the
// iterator is destroyed or Next() is called again. The caller may
// modify the contents of these strings (up as far as the first NUL,
// and only until the subsequent call to Next()) if desired.
// The offsets are all uint64 in order to handle the case of a
// 32-bit process running on a 64-bit kernel
//
// IMPORTANT NOTE: see top-of-class notes for details about what
// mapped regions Next() iterates over, depending on O/S.
// TODO(csilvers): make flags and filename const.
bool Next(uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename);
bool NextExt(uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename,
uint64 *file_mapping, uint64 *file_pages,
uint64 *anon_mapping, uint64 *anon_pages,
dev_t *dev);
~ProcMapsIterator();
private:
void Init(pid_t pid, Buffer *buffer, bool use_maps_backing);
char *ibuf_; // input buffer
char *stext_; // start of text
char *etext_; // end of text
char *nextline_; // start of next line
char *ebuf_; // end of buffer (1 char for a nul)
#if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__))
HANDLE snapshot_; // filehandle on dll info
// In a change from the usual W-A pattern, there is no A variant of
// MODULEENTRY32. Tlhelp32.h #defines the W variant, but not the A.
// We want the original A variants, and this #undef is the only
// way I see to get them. Redefining it when we're done prevents us
// from affecting other .cc files.
# ifdef MODULEENTRY32 // Alias of W
# undef MODULEENTRY32
MODULEENTRY32 module_; // info about current dll (and dll iterator)
# define MODULEENTRY32 MODULEENTRY32W
# else // It's the ascii, the one we want.
MODULEENTRY32 module_; // info about current dll (and dll iterator)
# endif
#elif defined(__MACH__)
int current_image_; // dll's are called "images" in macos parlance
int current_load_cmd_; // the segment of this dll we're examining
#elif defined(__sun__) // Solaris
int fd_;
char current_filename_[PATH_MAX];
#else
int fd_; // filehandle on /proc/*/maps
#endif
pid_t pid_;
char flags_[10];
Buffer* dynamic_buffer_; // dynamically-allocated Buffer
bool using_maps_backing_; // true if we are looking at maps_backing instead of maps.
};
#endif /* #ifndef SWIG */
// Helper routines
namespace tcmalloc {
int FillProcSelfMaps(char buf[], int size, bool* wrote_all);
void DumpProcSelfMaps(RawFD fd);
}
#endif /* #ifndef _SYSINFO_H_ */

View File

@ -1,134 +0,0 @@
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Le-Chun Wu
//
// This header file contains the macro definitions for thread safety
// annotations that allow the developers to document the locking policies
// of their multi-threaded code. The annotations can also help program
// analysis tools to identify potential thread safety issues.
//
// The annotations are implemented using GCC's "attributes" extension.
// Using the macros defined here instead of the raw GCC attributes allows
// for portability and future compatibility.
//
// This functionality is not yet fully implemented in perftools,
// but may be one day.
#ifndef BASE_THREAD_ANNOTATIONS_H_
#define BASE_THREAD_ANNOTATIONS_H_
#if defined(__GNUC__) \
&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) \
&& defined(__SUPPORT_TS_ANNOTATION__) && (!defined(SWIG))
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
// Document if a shared variable/field needs to be protected by a lock.
// GUARDED_BY allows the user to specify a particular lock that should be
// held when accessing the annotated variable, while GUARDED_VAR only
// indicates a shared variable should be guarded (by any lock). GUARDED_VAR
// is primarily used when the client cannot express the name of the lock.
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded)
// Document if the memory location pointed to by a pointer should be guarded
// by a lock when dereferencing the pointer. Similar to GUARDED_VAR,
// PT_GUARDED_VAR is primarily used when the client cannot express the name
// of the lock. Note that a pointer variable to a shared memory location
// could itself be a shared variable. For example, if a shared global pointer
// q, which is guarded by mu1, points to a shared memory location that is
// guarded by mu2, q should be annotated as follows:
// int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
#define PT_GUARDED_BY(x) \
THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x))
#define PT_GUARDED_VAR \
THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded)
// Document the acquisition order between locks that can be held
// simultaneously by a thread. For any two locks that need to be annotated
// to establish an acquisition order, only one of them needs the annotation.
// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
// and ACQUIRED_BEFORE.)
#define ACQUIRED_AFTER(x) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(x))
#define ACQUIRED_BEFORE(x) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(x))
// The following three annotations document the lock requirements for
// functions/methods.
// Document if a function expects certain locks to be held before it is called
#define EXCLUSIVE_LOCKS_REQUIRED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(x))
#define SHARED_LOCKS_REQUIRED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(x))
// Document the locks acquired in the body of the function. These locks
// cannot be held when calling this function (as google3's Mutex locks are
// non-reentrant).
#define LOCKS_EXCLUDED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(x))
// Document the lock the annotated function returns without acquiring it.
#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
// Document if a class/type is a lockable type (such as the Mutex class).
#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
// Document if a class is a scoped lockable type (such as the MutexLock class).
#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
// The following annotations specify lock and unlock primitives.
#define EXCLUSIVE_LOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(x))
#define SHARED_LOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(x))
#define EXCLUSIVE_TRYLOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(x))
#define SHARED_TRYLOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(x))
#define UNLOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(unlock(x))
// An escape hatch for thread safety analysis to ignore the annotated function.
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
#endif // BASE_THREAD_ANNOTATIONS_H_

View File

@ -1,83 +0,0 @@
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#include "config.h"
#include "base/thread_lister.h"
#include <stdio.h> /* needed for NULL on some powerpc platforms (?!) */
#include <sys/types.h>
#include <unistd.h> /* for getpid */
#ifdef HAVE_SYS_PRCTL
# include <sys/prctl.h>
#endif
#include "base/linuxthreads.h"
/* Include other thread listers here that define THREADS macro
* only when they can provide a good implementation.
*/
#ifndef THREADS
/* Default trivial thread lister for single-threaded applications,
* or if the multi-threading code has not been ported, yet.
*/
int TCMalloc_ListAllProcessThreads(void *parameter,
ListAllProcessThreadsCallBack callback, ...) {
int rc;
va_list ap;
pid_t pid;
#ifdef HAVE_SYS_PRCTL
int dumpable = prctl(PR_GET_DUMPABLE, 0);
if (!dumpable)
prctl(PR_SET_DUMPABLE, 1);
#endif
va_start(ap, callback);
pid = getpid();
rc = callback(parameter, 1, &pid, ap);
va_end(ap);
#ifdef HAVE_SYS_PRCTL
if (!dumpable)
prctl(PR_SET_DUMPABLE, 0);
#endif
return rc;
}
int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) {
return 1;
}
#endif /* ifndef THREADS */

View File

@ -1,83 +0,0 @@
/* -*- Mode: c; c-basic-offset: 2; indent-tabs-mode: nil -*- */
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#ifndef _THREAD_LISTER_H
#define _THREAD_LISTER_H
#include <stdarg.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*ListAllProcessThreadsCallBack)(void *parameter,
int num_threads,
pid_t *thread_pids,
va_list ap);
/* This function gets the list of all linux threads of the current process
* passes them to the 'callback' along with the 'parameter' pointer; at the
* call back call time all the threads are paused via
* PTRACE_ATTACH.
* The callback is executed from a separate thread which shares only the
* address space, the filesystem, and the filehandles with the caller. Most
* notably, it does not share the same pid and ppid; and if it terminates,
* the rest of the application is still there. 'callback' is supposed to do
* or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if
* the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
* signals are blocked. If the 'callback' decides to unblock them, it must
* ensure that they cannot terminate the application, or that
* TCMalloc_ResumeAllProcessThreads will get called.
* It is an error for the 'callback' to make any library calls that could
* acquire locks. Most notably, this means that most system calls have to
* avoid going through libc. Also, this means that it is not legal to call
* exit() or abort().
* We return -1 on error and the return value of 'callback' on success.
*/
int TCMalloc_ListAllProcessThreads(void *parameter,
ListAllProcessThreadsCallBack callback, ...);
/* This function resumes the list of all linux threads that
* TCMalloc_ListAllProcessThreads pauses before giving to its
* callback. The function returns non-zero if at least one thread was
* suspended and has now been resumed.
*/
int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids);
#ifdef __cplusplus
}
#endif
#endif /* _THREAD_LISTER_H */

View File

@ -1,143 +0,0 @@
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in the kernel VDSO page.
//
// VDSOSupport -- a class representing kernel VDSO (if present).
//
#include "base/vdso_support.h"
#ifdef HAVE_VDSO_SUPPORT // defined in vdso_support.h
#include <fcntl.h>
#include <stddef.h> // for ptrdiff_t
#include "base/atomicops.h" // for MemoryBarrier
#include "base/linux_syscall_support.h"
#include "base/logging.h"
#include "base/dynamic_annotations.h"
#include "base/basictypes.h" // for COMPILE_ASSERT
using base::subtle::MemoryBarrier;
#ifndef AT_SYSINFO_EHDR
#define AT_SYSINFO_EHDR 33
#endif
namespace base {
const void *VDSOSupport::vdso_base_ = ElfMemImage::kInvalidBase;
VDSOSupport::VDSOSupport()
// If vdso_base_ is still set to kInvalidBase, we got here
// before VDSOSupport::Init has been called. Call it now.
: image_(vdso_base_ == ElfMemImage::kInvalidBase ? Init() : vdso_base_) {
}
// NOTE: we can't use GoogleOnceInit() below, because we can be
// called by tcmalloc, and none of the *once* stuff may be functional yet.
//
// In addition, we hope that the VDSOSupportHelper constructor
// causes this code to run before there are any threads, and before
// InitGoogle() has executed any chroot or setuid calls.
//
// Finally, even if there is a race here, it is harmless, because
// the operation should be idempotent.
const void *VDSOSupport::Init() {
if (vdso_base_ == ElfMemImage::kInvalidBase) {
// Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
// on stack, and so glibc works as if VDSO was not present.
// But going directly to kernel via /proc/self/auxv below bypasses
// Valgrind zapping. So we check for Valgrind separately.
if (RunningOnValgrind()) {
vdso_base_ = NULL;
return NULL;
}
int fd = open("/proc/self/auxv", O_RDONLY);
if (fd == -1) {
// Kernel too old to have a VDSO.
vdso_base_ = NULL;
return NULL;
}
ElfW(auxv_t) aux;
while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
if (aux.a_type == AT_SYSINFO_EHDR) {
COMPILE_ASSERT(sizeof(vdso_base_) == sizeof(aux.a_un.a_val),
unexpected_sizeof_pointer_NE_sizeof_a_val);
vdso_base_ = reinterpret_cast<void *>(aux.a_un.a_val);
break;
}
}
close(fd);
if (vdso_base_ == ElfMemImage::kInvalidBase) {
// Didn't find AT_SYSINFO_EHDR in auxv[].
vdso_base_ = NULL;
}
}
return vdso_base_;
}
const void *VDSOSupport::SetBase(const void *base) {
CHECK(base != ElfMemImage::kInvalidBase);
const void *old_base = vdso_base_;
vdso_base_ = base;
image_.Init(base);
return old_base;
}
bool VDSOSupport::LookupSymbol(const char *name,
const char *version,
int type,
SymbolInfo *info) const {
return image_.LookupSymbol(name, version, type, info);
}
bool VDSOSupport::LookupSymbolByAddress(const void *address,
SymbolInfo *info_out) const {
return image_.LookupSymbolByAddress(address, info_out);
}
// We need to make sure VDSOSupport::Init() is called before
// the main() runs, since it might do something like setuid or
// chroot. If VDSOSupport
// is used in any global constructor, this will happen, since
// VDSOSupport's constructor calls Init. But if not, we need to
// ensure it here, with a global constructor of our own. This
// is an allowed exception to the normal rule against non-trivial
// global constructors.
static class VDSOInitHelper {
public:
VDSOInitHelper() { VDSOSupport::Init(); }
} vdso_init_helper;
}
#endif // HAVE_VDSO_SUPPORT

View File

@ -1,132 +0,0 @@
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in the kernel VDSO page.
//
// VDSO stands for "Virtual Dynamic Shared Object" -- a page of
// executable code, which looks like a shared library, but doesn't
// necessarily exist anywhere on disk, and which gets mmap()ed into
// every process by kernels which support VDSO, such as 2.6.x for 32-bit
// executables, and 2.6.24 and above for 64-bit executables.
//
// More details could be found here:
// http://www.trilithium.com/johan/2005/08/linux-gate/
//
// VDSOSupport -- a class representing kernel VDSO (if present).
//
// Example usage:
// VDSOSupport vdso;
// VDSOSupport::SymbolInfo info;
// typedef (*FN)(unsigned *, void *, void *);
// FN fn = NULL;
// if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
// fn = reinterpret_cast<FN>(info.address);
// }
#ifndef BASE_VDSO_SUPPORT_H_
#define BASE_VDSO_SUPPORT_H_
#include "../config.h"
#include "base/basictypes.h"
#include "base/elf_mem_image.h"
#ifdef HAVE_ELF_MEM_IMAGE
#define HAVE_VDSO_SUPPORT 1
#include <stdlib.h> // for NULL
namespace base {
// NOTE: this class may be used from within tcmalloc, and can not
// use any memory allocation routines.
class VDSOSupport {
public:
VDSOSupport();
typedef ElfMemImage::SymbolInfo SymbolInfo;
typedef ElfMemImage::SymbolIterator SymbolIterator;
// Answers whether we have a vdso at all.
bool IsPresent() const { return image_.IsPresent(); }
// Allow to iterate over all VDSO symbols.
SymbolIterator begin() const { return image_.begin(); }
SymbolIterator end() const { return image_.end(); }
// Look up versioned dynamic symbol in the kernel VDSO.
// Returns false if VDSO is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out != NULL, additional details are filled in.
bool LookupSymbol(const char *name, const char *version,
int symbol_type, SymbolInfo *info_out) const;
// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if VDSO isn't present
// or doesn't have a symbol overlapping given address.
// If info_out != NULL, additional details are filled in.
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
// Used only for testing. Replace real VDSO base with a mock.
// Returns previous value of vdso_base_. After you are done testing,
// you are expected to call SetBase() with previous value, in order to
// reset state to the way it was.
const void *SetBase(const void *s);
// Computes vdso_base_ and returns it. Should be called as early as
// possible; before any thread creation, chroot or setuid.
static const void *Init();
private:
// image_ represents VDSO ELF image in memory.
// image_.ehdr_ == NULL implies there is no VDSO.
ElfMemImage image_;
// Cached value of auxv AT_SYSINFO_EHDR, computed once.
// This is a tri-state:
// kInvalidBase => value hasn't been determined yet.
// 0 => there is no VDSO.
// else => vma of VDSO Elf{32,64}_Ehdr.
//
// When testing with mock VDSO, low bit is set.
// The low bit is always available because vdso_base_ is
// page-aligned.
static const void *vdso_base_;
DISALLOW_COPY_AND_ASSIGN(VDSOSupport);
};
} // namespace base
#endif // HAVE_ELF_MEM_IMAGE
#endif // BASE_VDSO_SUPPORT_H_

View File

@ -1,387 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
#include "config.h"
#include <algorithm>
#include "central_freelist.h"
#include "internal_logging.h" // for ASSERT, MESSAGE
#include "linked_list.h" // for SLL_Next, SLL_Push, etc
#include "page_heap.h" // for PageHeap
#include "static_vars.h" // for Static
using std::min;
using std::max;
namespace tcmalloc {
void CentralFreeList::Init(size_t cl) {
size_class_ = cl;
tcmalloc::DLL_Init(&empty_);
tcmalloc::DLL_Init(&nonempty_);
num_spans_ = 0;
counter_ = 0;
max_cache_size_ = kMaxNumTransferEntries;
#ifdef TCMALLOC_SMALL_BUT_SLOW
// Disable the transfer cache for the small footprint case.
cache_size_ = 0;
#else
cache_size_ = 16;
#endif
if (cl > 0) {
// Limit the maximum size of the cache based on the size class. If this
// is not done, large size class objects will consume a lot of memory if
// they just sit in the transfer cache.
int32_t bytes = Static::sizemap()->ByteSizeForClass(cl);
int32_t objs_to_move = Static::sizemap()->num_objects_to_move(cl);
ASSERT(objs_to_move > 0 && bytes > 0);
// Limit each size class cache to at most 1MB of objects or one entry,
// whichever is greater. Total transfer cache memory used across all
// size classes then can't be greater than approximately
// 1MB * kMaxNumTransferEntries.
// min and max are in parens to avoid macro-expansion on windows.
max_cache_size_ = (min)(max_cache_size_,
(max)(1, (1024 * 1024) / (bytes * objs_to_move)));
cache_size_ = (min)(cache_size_, max_cache_size_);
}
used_slots_ = 0;
ASSERT(cache_size_ <= max_cache_size_);
}
void CentralFreeList::ReleaseListToSpans(void* start) {
while (start) {
void *next = SLL_Next(start);
ReleaseToSpans(start);
start = next;
}
}
// MapObjectToSpan should logically be part of ReleaseToSpans. But
// this triggers an optimization bug in gcc 4.5.0. Moving to a
// separate function, and making sure that function isn't inlined,
// seems to fix the problem. It also should be fixed for gcc 4.5.1.
static
#if __GNUC__ == 4 && __GNUC_MINOR__ == 5 && __GNUC_PATCHLEVEL__ == 0
__attribute__ ((noinline))
#endif
Span* MapObjectToSpan(void* object) {
const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
Span* span = Static::pageheap()->GetDescriptor(p);
return span;
}
void CentralFreeList::ReleaseToSpans(void* object) {
Span* span = MapObjectToSpan(object);
ASSERT(span != NULL);
ASSERT(span->refcount > 0);
// If span is empty, move it to non-empty list
if (span->objects == NULL) {
tcmalloc::DLL_Remove(span);
tcmalloc::DLL_Prepend(&nonempty_, span);
Event(span, 'N', 0);
}
// The following check is expensive, so it is disabled by default
if (false) {
// Check that object does not occur in list
int got = 0;
for (void* p = span->objects; p != NULL; p = *((void**) p)) {
ASSERT(p != object);
got++;
}
ASSERT(got + span->refcount ==
(span->length<<kPageShift) /
Static::sizemap()->ByteSizeForClass(span->sizeclass));
}
counter_++;
span->refcount--;
if (span->refcount == 0) {
Event(span, '#', 0);
counter_ -= ((span->length<<kPageShift) /
Static::sizemap()->ByteSizeForClass(span->sizeclass));
tcmalloc::DLL_Remove(span);
--num_spans_;
// Release central list lock while operating on pageheap
lock_.Unlock();
{
SpinLockHolder h(Static::pageheap_lock());
Static::pageheap()->Delete(span);
}
lock_.Lock();
} else {
*(reinterpret_cast<void**>(object)) = span->objects;
span->objects = object;
}
}
bool CentralFreeList::EvictRandomSizeClass(
int locked_size_class, bool force) {
static int race_counter = 0;
int t = race_counter++; // Updated without a lock, but who cares.
if (t >= kNumClasses) {
while (t >= kNumClasses) {
t -= kNumClasses;
}
race_counter = t;
}
ASSERT(t >= 0);
ASSERT(t < kNumClasses);
if (t == locked_size_class) return false;
return Static::central_cache()[t].ShrinkCache(locked_size_class, force);
}
bool CentralFreeList::MakeCacheSpace() {
// Is there room in the cache?
if (used_slots_ < cache_size_) return true;
// Check if we can expand this cache?
if (cache_size_ == max_cache_size_) return false;
// Ok, we'll try to grab an entry from some other size class.
if (EvictRandomSizeClass(size_class_, false) ||
EvictRandomSizeClass(size_class_, true)) {
// Succeeded in evicting, we're going to make our cache larger.
// However, we may have dropped and re-acquired the lock in
// EvictRandomSizeClass (via ShrinkCache and the LockInverter), so the
// cache_size may have changed. Therefore, check and verify that it is
// still OK to increase the cache_size.
if (cache_size_ < max_cache_size_) {
cache_size_++;
return true;
}
}
return false;
}
namespace {
class LockInverter {
private:
SpinLock *held_, *temp_;
public:
inline explicit LockInverter(SpinLock* held, SpinLock *temp)
: held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
};
}
// This function is marked as NO_THREAD_SAFETY_ANALYSIS because it uses
// LockInverter to release one lock and acquire another in scoped-lock
// style, which our current annotation/analysis does not support.
bool CentralFreeList::ShrinkCache(int locked_size_class, bool force)
NO_THREAD_SAFETY_ANALYSIS {
// Start with a quick check without taking a lock.
if (cache_size_ == 0) return false;
// We don't evict from a full cache unless we are 'forcing'.
if (force == false && used_slots_ == cache_size_) return false;
// Grab lock, but first release the other lock held by this thread. We use
// the lock inverter to ensure that we never hold two size class locks
// concurrently. That can create a deadlock because there is no well
// defined nesting order.
LockInverter li(&Static::central_cache()[locked_size_class].lock_, &lock_);
ASSERT(used_slots_ <= cache_size_);
ASSERT(0 <= cache_size_);
if (cache_size_ == 0) return false;
if (used_slots_ == cache_size_) {
if (force == false) return false;
// ReleaseListToSpans releases the lock, so we have to make all the
// updates to the central list before calling it.
cache_size_--;
used_slots_--;
ReleaseListToSpans(tc_slots_[used_slots_].head);
return true;
}
cache_size_--;
return true;
}
void CentralFreeList::InsertRange(void *start, void *end, int N) {
SpinLockHolder h(&lock_);
if (N == Static::sizemap()->num_objects_to_move(size_class_) &&
MakeCacheSpace()) {
int slot = used_slots_++;
ASSERT(slot >=0);
ASSERT(slot < max_cache_size_);
TCEntry *entry = &tc_slots_[slot];
entry->head = start;
entry->tail = end;
return;
}
ReleaseListToSpans(start);
}
int CentralFreeList::RemoveRange(void **start, void **end, int N) {
ASSERT(N > 0);
lock_.Lock();
if (N == Static::sizemap()->num_objects_to_move(size_class_) &&
used_slots_ > 0) {
int slot = --used_slots_;
ASSERT(slot >= 0);
TCEntry *entry = &tc_slots_[slot];
*start = entry->head;
*end = entry->tail;
lock_.Unlock();
return N;
}
int result = 0;
*start = NULL;
*end = NULL;
// TODO: Prefetch multiple TCEntries?
result = FetchFromOneSpansSafe(N, start, end);
if (result != 0) {
while (result < N) {
int n;
void* head = NULL;
void* tail = NULL;
n = FetchFromOneSpans(N - result, &head, &tail);
if (!n) break;
result += n;
SLL_PushRange(start, head, tail);
}
}
lock_.Unlock();
return result;
}
int CentralFreeList::FetchFromOneSpansSafe(int N, void **start, void **end) {
int result = FetchFromOneSpans(N, start, end);
if (!result) {
Populate();
result = FetchFromOneSpans(N, start, end);
}
return result;
}
int CentralFreeList::FetchFromOneSpans(int N, void **start, void **end) {
if (tcmalloc::DLL_IsEmpty(&nonempty_)) return 0;
Span* span = nonempty_.next;
ASSERT(span->objects != NULL);
int result = 0;
void *prev, *curr;
curr = span->objects;
do {
prev = curr;
curr = *(reinterpret_cast<void**>(curr));
} while (++result < N && curr != NULL);
if (curr == NULL) {
// Move to empty list
tcmalloc::DLL_Remove(span);
tcmalloc::DLL_Prepend(&empty_, span);
Event(span, 'E', 0);
}
*start = span->objects;
*end = prev;
span->objects = curr;
SLL_SetNext(*end, NULL);
span->refcount += result;
counter_ -= result;
return result;
}
// Fetch memory from the system and add to the central cache freelist.
void CentralFreeList::Populate() {
// Release central list lock while operating on pageheap
lock_.Unlock();
const size_t npages = Static::sizemap()->class_to_pages(size_class_);
Span* span;
{
SpinLockHolder h(Static::pageheap_lock());
span = Static::pageheap()->New(npages);
if (span) Static::pageheap()->RegisterSizeClass(span, size_class_);
}
if (span == NULL) {
Log(kLog, __FILE__, __LINE__,
"tcmalloc: allocation failed", npages << kPageShift);
lock_.Lock();
return;
}
ASSERT(span->length == npages);
// Cache sizeclass info eagerly. Locking is not necessary.
// (Instead of being eager, we could just replace any stale info
// about this span, but that seems to be no better in practice.)
for (int i = 0; i < npages; i++) {
Static::pageheap()->CacheSizeClass(span->start + i, size_class_);
}
// Split the block into pieces and add to the free-list
// TODO: coloring of objects to avoid cache conflicts?
void** tail = &span->objects;
char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
char* limit = ptr + (npages << kPageShift);
const size_t size = Static::sizemap()->ByteSizeForClass(size_class_);
int num = 0;
while (ptr + size <= limit) {
*tail = ptr;
tail = reinterpret_cast<void**>(ptr);
ptr += size;
num++;
}
ASSERT(ptr <= limit);
*tail = NULL;
span->refcount = 0; // No sub-object in use yet
// Add span to list of non-empty spans
lock_.Lock();
tcmalloc::DLL_Prepend(&nonempty_, span);
++num_spans_;
counter_ += num;
}
int CentralFreeList::tc_length() {
SpinLockHolder h(&lock_);
return used_slots_ * Static::sizemap()->num_objects_to_move(size_class_);
}
size_t CentralFreeList::OverheadBytes() {
SpinLockHolder h(&lock_);
if (size_class_ == 0) { // 0 holds the 0-sized allocations
return 0;
}
const size_t pages_per_span = Static::sizemap()->class_to_pages(size_class_);
const size_t object_size = Static::sizemap()->class_to_size(size_class_);
ASSERT(object_size > 0);
const size_t overhead_per_span = (pages_per_span * kPageSize) % object_size;
return num_spans_ * overhead_per_span;
}
} // namespace tcmalloc

View File

@ -1,211 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
#ifndef TCMALLOC_CENTRAL_FREELIST_H_
#define TCMALLOC_CENTRAL_FREELIST_H_
#include "config.h"
#include <stddef.h> // for size_t
#ifdef HAVE_STDINT_H
#include <stdint.h> // for int32_t
#endif
#include "base/spinlock.h"
#include "base/thread_annotations.h"
#include "common.h"
#include "span.h"
namespace tcmalloc {
// Data kept per size-class in central cache.
class CentralFreeList {
public:
// A CentralFreeList may be used before its constructor runs.
// So we prevent lock_'s constructor from doing anything to the
// lock_ state.
CentralFreeList() : lock_(base::LINKER_INITIALIZED) { }
void Init(size_t cl);
// These methods all do internal locking.
// Insert the specified range into the central freelist. N is the number of
// elements in the range. RemoveRange() is the opposite operation.
void InsertRange(void *start, void *end, int N);
// Returns the actual number of fetched elements and sets *start and *end.
int RemoveRange(void **start, void **end, int N);
// Returns the number of free objects in cache.
int length() {
SpinLockHolder h(&lock_);
return counter_;
}
// Returns the number of free objects in the transfer cache.
int tc_length();
// Returns the memory overhead (internal fragmentation) attributable
// to the freelist. This is memory lost when the size of elements
// in a freelist doesn't exactly divide the page-size (an 8192-byte
// page full of 5-byte objects would have 2 bytes memory overhead).
size_t OverheadBytes();
// Lock/Unlock the internal SpinLock. Used on the pthread_atfork call
// to set the lock in a consistent state before the fork.
void Lock() {
lock_.Lock();
}
void Unlock() {
lock_.Unlock();
}
private:
// TransferCache is used to cache transfers of
// sizemap.num_objects_to_move(size_class) back and forth between
// thread caches and the central cache for a given size class.
struct TCEntry {
void *head; // Head of chain of objects.
void *tail; // Tail of chain of objects.
};
// A central cache freelist can have anywhere from 0 to kMaxNumTransferEntries
// slots to put link list chains into.
#ifdef TCMALLOC_SMALL_BUT_SLOW
// For the small memory model, the transfer cache is not used.
static const int kMaxNumTransferEntries = 0;
#else
// Starting point for the the maximum number of entries in the transfer cache.
// This actual maximum for a given size class may be lower than this
// maximum value.
static const int kMaxNumTransferEntries = 64;
#endif
// REQUIRES: lock_ is held
// Remove object from cache and return.
// Return NULL if no free entries in cache.
int FetchFromOneSpans(int N, void **start, void **end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock_ is held
// Remove object from cache and return. Fetches
// from pageheap if cache is empty. Only returns
// NULL on allocation failure.
int FetchFromOneSpansSafe(int N, void **start, void **end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock_ is held
// Release a linked list of objects to spans.
// May temporarily release lock_.
void ReleaseListToSpans(void *start) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock_ is held
// Release an object to spans.
// May temporarily release lock_.
void ReleaseToSpans(void* object) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock_ is held
// Populate cache by fetching from the page heap.
// May temporarily release lock_.
void Populate() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock is held.
// Tries to make room for a TCEntry. If the cache is full it will try to
// expand it at the cost of some other cache size. Return false if there is
// no space.
bool MakeCacheSpace() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// REQUIRES: lock_ for locked_size_class is held.
// Picks a "random" size class to steal TCEntry slot from. In reality it
// just iterates over the sizeclasses but does so without taking a lock.
// Returns true on success.
// May temporarily lock a "random" size class.
static bool EvictRandomSizeClass(int locked_size_class, bool force);
// REQUIRES: lock_ is *not* held.
// Tries to shrink the Cache. If force is true it will relase objects to
// spans if it allows it to shrink the cache. Return false if it failed to
// shrink the cache. Decrements cache_size_ on succeess.
// May temporarily take lock_. If it takes lock_, the locked_size_class
// lock is released to keep the thread from holding two size class locks
// concurrently which could lead to a deadlock.
bool ShrinkCache(int locked_size_class, bool force) LOCKS_EXCLUDED(lock_);
// This lock protects all the data members. cached_entries and cache_size_
// may be looked at without holding the lock.
SpinLock lock_;
// We keep linked lists of empty and non-empty spans.
size_t size_class_; // My size class
Span empty_; // Dummy header for list of empty spans
Span nonempty_; // Dummy header for list of non-empty spans
size_t num_spans_; // Number of spans in empty_ plus nonempty_
size_t counter_; // Number of free objects in cache entry
// Here we reserve space for TCEntry cache slots. Space is preallocated
// for the largest possible number of entries than any one size class may
// accumulate. Not all size classes are allowed to accumulate
// kMaxNumTransferEntries, so there is some wasted space for those size
// classes.
TCEntry tc_slots_[kMaxNumTransferEntries];
// Number of currently used cached entries in tc_slots_. This variable is
// updated under a lock but can be read without one.
int32_t used_slots_;
// The current number of slots for this size class. This is an
// adaptive value that is increased if there is lots of traffic
// on a given size class.
int32_t cache_size_;
// Maximum size of the cache for a given size class.
int32_t max_cache_size_;
};
// Pads each CentralCache object to multiple of 64 bytes. Since some
// compilers (such as MSVC) don't like it when the padding is 0, I use
// template specialization to remove the padding entirely when
// sizeof(CentralFreeList) is a multiple of 64.
template<int kFreeListSizeMod64>
class CentralFreeListPaddedTo : public CentralFreeList {
private:
char pad_[64 - kFreeListSizeMod64];
};
template<>
class CentralFreeListPaddedTo<0> : public CentralFreeList {
};
class CentralFreeListPadded : public CentralFreeListPaddedTo<
sizeof(CentralFreeList) % 64> {
};
} // namespace tcmalloc
#endif // TCMALLOC_CENTRAL_FREELIST_H_

View File

@ -1,275 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
#include <stdlib.h> // for getenv and strtol
#include "config.h"
#include "common.h"
#include "system-alloc.h"
#include "base/spinlock.h"
#include "getenv_safe.h" // TCMallocGetenvSafe
namespace tcmalloc {
// Define the maximum number of object per classe type to transfer between
// thread and central caches.
static int32 FLAGS_tcmalloc_transfer_num_objects;
static const int32 kDefaultTransferNumObjecs = 512;
// The init function is provided to explicit initialize the variable value
// from the env. var to avoid C++ global construction that might defer its
// initialization after a malloc/new call.
static inline void InitTCMallocTransferNumObjects()
{
if (UNLIKELY(FLAGS_tcmalloc_transfer_num_objects == 0)) {
const char *envval = TCMallocGetenvSafe("TCMALLOC_TRANSFER_NUM_OBJ");
FLAGS_tcmalloc_transfer_num_objects = !envval ? kDefaultTransferNumObjecs :
strtol(envval, NULL, 10);
}
}
// Note: the following only works for "n"s that fit in 32-bits, but
// that is fine since we only use it for small sizes.
static inline int LgFloor(size_t n) {
int log = 0;
for (int i = 4; i >= 0; --i) {
int shift = (1 << i);
size_t x = n >> shift;
if (x != 0) {
n = x;
log += shift;
}
}
ASSERT(n == 1);
return log;
}
int AlignmentForSize(size_t size) {
int alignment = kAlignment;
if (size > kMaxSize) {
// Cap alignment at kPageSize for large sizes.
alignment = kPageSize;
} else if (size >= 128) {
// Space wasted due to alignment is at most 1/8, i.e., 12.5%.
alignment = (1 << LgFloor(size)) / 8;
} else if (size >= kMinAlign) {
// We need an alignment of at least 16 bytes to satisfy
// requirements for some SSE types.
alignment = kMinAlign;
}
// Maximum alignment allowed is page size alignment.
if (alignment > kPageSize) {
alignment = kPageSize;
}
CHECK_CONDITION(size < kMinAlign || alignment >= kMinAlign);
CHECK_CONDITION((alignment & (alignment - 1)) == 0);
return alignment;
}
int SizeMap::NumMoveSize(size_t size) {
if (size == 0) return 0;
// Use approx 64k transfers between thread and central caches.
int num = static_cast<int>(64.0 * 1024.0 / size);
if (num < 2) num = 2;
// Avoid bringing too many objects into small object free lists.
// If this value is too large:
// - We waste memory with extra objects sitting in the thread caches.
// - The central freelist holds its lock for too long while
// building a linked list of objects, slowing down the allocations
// of other threads.
// If this value is too small:
// - We go to the central freelist too often and we have to acquire
// its lock each time.
// This value strikes a balance between the constraints above.
if (num > FLAGS_tcmalloc_transfer_num_objects)
num = FLAGS_tcmalloc_transfer_num_objects;
return num;
}
// Initialize the mapping arrays
void SizeMap::Init() {
InitTCMallocTransferNumObjects();
// Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
if (ClassIndex(0) != 0) {
Log(kCrash, __FILE__, __LINE__,
"Invalid class index for size 0", ClassIndex(0));
}
if (ClassIndex(kMaxSize) >= sizeof(class_array_)) {
Log(kCrash, __FILE__, __LINE__,
"Invalid class index for kMaxSize", ClassIndex(kMaxSize));
}
// Compute the size classes we want to use
int sc = 1; // Next size class to assign
int alignment = kAlignment;
CHECK_CONDITION(kAlignment <= kMinAlign);
for (size_t size = kAlignment; size <= kMaxSize; size += alignment) {
alignment = AlignmentForSize(size);
CHECK_CONDITION((size % alignment) == 0);
int blocks_to_move = NumMoveSize(size) / 4;
size_t psize = 0;
do {
psize += kPageSize;
// Allocate enough pages so leftover is less than 1/8 of total.
// This bounds wasted space to at most 12.5%.
while ((psize % size) > (psize >> 3)) {
psize += kPageSize;
}
// Continue to add pages until there are at least as many objects in
// the span as are needed when moving objects from the central
// freelists and spans to the thread caches.
} while ((psize / size) < (blocks_to_move));
const size_t my_pages = psize >> kPageShift;
if (sc > 1 && my_pages == class_to_pages_[sc-1]) {
// See if we can merge this into the previous class without
// increasing the fragmentation of the previous class.
const size_t my_objects = (my_pages << kPageShift) / size;
const size_t prev_objects = (class_to_pages_[sc-1] << kPageShift)
/ class_to_size_[sc-1];
if (my_objects == prev_objects) {
// Adjust last class to include this size
class_to_size_[sc-1] = size;
continue;
}
}
// Add new class
class_to_pages_[sc] = my_pages;
class_to_size_[sc] = size;
sc++;
}
if (sc != kNumClasses) {
Log(kCrash, __FILE__, __LINE__,
"wrong number of size classes: (found vs. expected )", sc, kNumClasses);
}
// Initialize the mapping arrays
int next_size = 0;
for (int c = 1; c < kNumClasses; c++) {
const int max_size_in_class = class_to_size_[c];
for (int s = next_size; s <= max_size_in_class; s += kAlignment) {
class_array_[ClassIndex(s)] = c;
}
next_size = max_size_in_class + kAlignment;
}
// Double-check sizes just to be safe
for (size_t size = 0; size <= kMaxSize;) {
const int sc = SizeClass(size);
if (sc <= 0 || sc >= kNumClasses) {
Log(kCrash, __FILE__, __LINE__,
"Bad size class (class, size)", sc, size);
}
if (sc > 1 && size <= class_to_size_[sc-1]) {
Log(kCrash, __FILE__, __LINE__,
"Allocating unnecessarily large class (class, size)", sc, size);
}
const size_t s = class_to_size_[sc];
if (size > s || s == 0) {
Log(kCrash, __FILE__, __LINE__,
"Bad (class, size, requested)", sc, s, size);
}
if (size <= kMaxSmallSize) {
size += 8;
} else {
size += 128;
}
}
// Initialize the num_objects_to_move array.
for (size_t cl = 1; cl < kNumClasses; ++cl) {
num_objects_to_move_[cl] = NumMoveSize(ByteSizeForClass(cl));
}
}
// Metadata allocator -- keeps stats about how many bytes allocated.
static uint64_t metadata_system_bytes_ = 0;
static const size_t kMetadataAllocChunkSize = 8*1024*1024;
// As ThreadCache objects are allocated with MetaDataAlloc, and also
// CACHELINE_ALIGNED, we must use the same alignment as TCMalloc_SystemAlloc.
static const size_t kMetadataAllignment = sizeof(MemoryAligner);
static char *metadata_chunk_alloc_;
static size_t metadata_chunk_avail_;
static SpinLock metadata_alloc_lock(SpinLock::LINKER_INITIALIZED);
void* MetaDataAlloc(size_t bytes) {
if (bytes >= kMetadataAllocChunkSize) {
void *rv = TCMalloc_SystemAlloc(bytes,
NULL, kMetadataAllignment);
if (rv != NULL) {
metadata_system_bytes_ += bytes;
}
return rv;
}
SpinLockHolder h(&metadata_alloc_lock);
// the following works by essentially turning address to integer of
// log_2 kMetadataAllignment size and negating it. I.e. negated
// value + original value gets 0 and that's what we want modulo
// kMetadataAllignment. Note, we negate before masking higher bits
// off, otherwise we'd have to mask them off after negation anyways.
intptr_t alignment = -reinterpret_cast<intptr_t>(metadata_chunk_alloc_) & (kMetadataAllignment-1);
if (metadata_chunk_avail_ < bytes + alignment) {
size_t real_size;
void *ptr = TCMalloc_SystemAlloc(kMetadataAllocChunkSize,
&real_size, kMetadataAllignment);
if (ptr == NULL) {
return NULL;
}
metadata_chunk_alloc_ = static_cast<char *>(ptr);
metadata_chunk_avail_ = real_size;
alignment = 0;
}
void *rv = static_cast<void *>(metadata_chunk_alloc_ + alignment);
bytes += alignment;
metadata_chunk_alloc_ += bytes;
metadata_chunk_avail_ -= bytes;
metadata_system_bytes_ += bytes;
return rv;
}
uint64_t metadata_system_bytes() { return metadata_system_bytes_; }
} // namespace tcmalloc

View File

@ -1,295 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
//
// Common definitions for tcmalloc code.
#ifndef TCMALLOC_COMMON_H_
#define TCMALLOC_COMMON_H_
#include "config.h"
#include <stddef.h> // for size_t
#ifdef HAVE_STDINT_H
#include <stdint.h> // for uintptr_t, uint64_t
#endif
#include "internal_logging.h" // for ASSERT, etc
#include "base/basictypes.h" // for LIKELY, etc
#ifdef HAVE_BUILTIN_EXPECT
#define LIKELY(x) __builtin_expect(!!(x), 1)
#define UNLIKELY(x) __builtin_expect(!!(x), 0)
#else
#define LIKELY(x) (x)
#define UNLIKELY(x) (x)
#endif
// Type that can hold a page number
typedef uintptr_t PageID;
// Type that can hold the length of a run of pages
typedef uintptr_t Length;
//-------------------------------------------------------------------
// Configuration
//-------------------------------------------------------------------
#if defined(TCMALLOC_ALIGN_8BYTES)
// Unless we force to use 8 bytes alignment we use an alignment of
// at least 16 bytes to statisfy requirements for some SSE types.
// Keep in mind when using the 16 bytes alignment you can have a space
// waste due alignment of 25%. (eg malloc of 24 bytes will get 32 bytes)
static const size_t kMinAlign = 8;
// Number of classes created until reach page size 128.
static const size_t kBaseClasses = 16;
#else
static const size_t kMinAlign = 16;
static const size_t kBaseClasses = 9;
#endif
// Using large pages speeds up the execution at a cost of larger memory use.
// Deallocation may speed up by a factor as the page map gets 8x smaller, so
// lookups in the page map result in fewer L2 cache misses, which translates to
// speedup for application/platform combinations with high L2 cache pressure.
// As the number of size classes increases with large pages, we increase
// the thread cache allowance to avoid passing more free ranges to and from
// central lists. Also, larger pages are less likely to get freed.
// These two factors cause a bounded increase in memory use.
#if defined(TCMALLOC_32K_PAGES)
static const size_t kPageShift = 15;
static const size_t kNumClasses = kBaseClasses + 69;
#elif defined(TCMALLOC_64K_PAGES)
static const size_t kPageShift = 16;
static const size_t kNumClasses = kBaseClasses + 73;
#else
static const size_t kPageShift = 13;
static const size_t kNumClasses = kBaseClasses + 79;
#endif
static const size_t kMaxThreadCacheSize = 4 << 20;
static const size_t kPageSize = 1 << kPageShift;
static const size_t kMaxSize = 256 * 1024;
static const size_t kAlignment = 8;
static const size_t kLargeSizeClass = 0;
// For all span-lengths < kMaxPages we keep an exact-size list.
static const size_t kMaxPages = 1 << (20 - kPageShift);
// Default bound on the total amount of thread caches.
#ifdef TCMALLOC_SMALL_BUT_SLOW
// Make the overall thread cache no bigger than that of a single thread
// for the small memory footprint case.
static const size_t kDefaultOverallThreadCacheSize = kMaxThreadCacheSize;
#else
static const size_t kDefaultOverallThreadCacheSize = 8u * kMaxThreadCacheSize;
#endif
// Lower bound on the per-thread cache sizes
static const size_t kMinThreadCacheSize = kMaxSize * 2;
// The number of bytes one ThreadCache will steal from another when
// the first ThreadCache is forced to Scavenge(), delaying the
// next call to Scavenge for this thread.
static const size_t kStealAmount = 1 << 16;
// The number of times that a deallocation can cause a freelist to
// go over its max_length() before shrinking max_length().
static const int kMaxOverages = 3;
// Maximum length we allow a per-thread free-list to have before we
// move objects from it into the corresponding central free-list. We
// want this big to avoid locking the central free-list too often. It
// should not hurt to make this list somewhat big because the
// scavenging code will shrink it down when its contents are not in use.
static const int kMaxDynamicFreeListLength = 8192;
static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
#if defined __x86_64__
// All current and planned x86_64 processors only look at the lower 48 bits
// in virtual to physical address translation. The top 16 are thus unused.
// TODO(rus): Under what operating systems can we increase it safely to 17?
// This lets us use smaller page maps. On first allocation, a 36-bit page map
// uses only 96 KB instead of the 4.5 MB used by a 52-bit page map.
static const int kAddressBits = (sizeof(void*) < 8 ? (8 * sizeof(void*)) : 48);
#else
static const int kAddressBits = 8 * sizeof(void*);
#endif
namespace tcmalloc {
// Convert byte size into pages. This won't overflow, but may return
// an unreasonably large value if bytes is huge enough.
inline Length pages(size_t bytes) {
return (bytes >> kPageShift) +
((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
}
// For larger allocation sizes, we use larger memory alignments to
// reduce the number of size classes.
int AlignmentForSize(size_t size);
// Size-class information + mapping
class SizeMap {
private:
// Number of objects to move between a per-thread list and a central
// list in one shot. We want this to be not too small so we can
// amortize the lock overhead for accessing the central list. Making
// it too big may temporarily cause unnecessary memory wastage in the
// per-thread free list until the scavenger cleans up the list.
int num_objects_to_move_[kNumClasses];
//-------------------------------------------------------------------
// Mapping from size to size_class and vice versa
//-------------------------------------------------------------------
// Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
// array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
// So for these larger sizes we have an array indexed by ceil(size/128).
//
// We flatten both logical arrays into one physical array and use
// arithmetic to compute an appropriate index. The constants used by
// ClassIndex() were selected to make the flattening work.
//
// Examples:
// Size Expression Index
// -------------------------------------------------------
// 0 (0 + 7) / 8 0
// 1 (1 + 7) / 8 1
// ...
// 1024 (1024 + 7) / 8 128
// 1025 (1025 + 127 + (120<<7)) / 128 129
// ...
// 32768 (32768 + 127 + (120<<7)) / 128 376
static const int kMaxSmallSize = 1024;
static const size_t kClassArraySize =
((kMaxSize + 127 + (120 << 7)) >> 7) + 1;
unsigned char class_array_[kClassArraySize];
static inline size_t SmallSizeClass(size_t s) {
return (static_cast<uint32_t>(s) + 7) >> 3;
}
static inline size_t LargeSizeClass(size_t s) {
return (static_cast<uint32_t>(s) + 127 + (120 << 7)) >> 7;
}
// Compute index of the class_array[] entry for a given size
static inline size_t ClassIndex(size_t s) {
// Use unsigned arithmetic to avoid unnecessary sign extensions.
ASSERT(0 <= s);
ASSERT(s <= kMaxSize);
if (LIKELY(s <= kMaxSmallSize)) {
return SmallSizeClass(s);
} else {
return LargeSizeClass(s);
}
}
int NumMoveSize(size_t size);
// Mapping from size class to max size storable in that class
size_t class_to_size_[kNumClasses];
// Mapping from size class to number of pages to allocate at a time
size_t class_to_pages_[kNumClasses];
public:
// Constructor should do nothing since we rely on explicit Init()
// call, which may or may not be called before the constructor runs.
SizeMap() { }
// Initialize the mapping arrays
void Init();
inline int SizeClass(size_t size) {
return class_array_[ClassIndex(size)];
}
inline bool MaybeSizeClass(size_t size, size_t *size_class) {
size_t class_idx;
if (LIKELY(size <= kMaxSmallSize)) {
class_idx = SmallSizeClass(size);
} else if (size <= kMaxSize) {
class_idx = LargeSizeClass(size);
} else {
return false;
}
*size_class = class_array_[class_idx];
return true;
}
// Get the byte-size for a specified class
inline size_t ByteSizeForClass(size_t cl) {
return class_to_size_[cl];
}
// Mapping from size class to max size storable in that class
inline size_t class_to_size(size_t cl) {
return class_to_size_[cl];
}
// Mapping from size class to number of pages to allocate at a time
inline size_t class_to_pages(size_t cl) {
return class_to_pages_[cl];
}
// Number of objects to move between a per-thread list and a central
// list in one shot. We want this to be not too small so we can
// amortize the lock overhead for accessing the central list. Making
// it too big may temporarily cause unnecessary memory wastage in the
// per-thread free list until the scavenger cleans up the list.
inline int num_objects_to_move(size_t cl) {
return num_objects_to_move_[cl];
}
};
// Allocates "bytes" worth of memory and returns it. Increments
// metadata_system_bytes appropriately. May return NULL if allocation
// fails. Requires pageheap_lock is held.
void* MetaDataAlloc(size_t bytes);
// Returns the total number of bytes allocated from the system.
// Requires pageheap_lock is held.
uint64_t metadata_system_bytes();
// size/depth are made the same size as a pointer so that some generic
// code below can conveniently cast them back and forth to void*.
static const int kMaxStackDepth = 31;
struct StackTrace {
uintptr_t size; // Size of object
uintptr_t depth; // Number of PC values stored in array below
void* stack[kMaxStackDepth];
};
} // namespace tcmalloc
#endif // TCMALLOC_COMMON_H_

Some files were not shown because too many files have changed in this diff Show More