Merge branch 'master' of github.com:ClickHouse/ClickHouse into feature/randomASKII-function

This commit is contained in:
Sergei Shtykov 2019-12-21 14:51:05 +03:00
commit 1b9b326e4f
2105 changed files with 55135 additions and 56541 deletions

1
.github/CODEOWNERS vendored
View File

@ -1,3 +1,4 @@
dbms/* @ClickHouse/core-assigner dbms/* @ClickHouse/core-assigner
utils/* @ClickHouse/core-assigner
docs/* @ClickHouse/docs docs/* @ClickHouse/docs
docs/zh/* @ClickHouse/docs-zh docs/zh/* @ClickHouse/docs-zh

View File

@ -1,8 +1,6 @@
I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla/?lang=en I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla/?lang=en
For changelog. Remove if this is non-significant change. Changelog category (leave one):
Category (leave one):
- New Feature - New Feature
- Bug Fix - Bug Fix
- Improvement - Improvement
@ -11,11 +9,14 @@ Category (leave one):
- Build/Testing/Packaging Improvement - Build/Testing/Packaging Improvement
- Documentation - Documentation
- Other - Other
- Non-significant (changelog entry is not needed)
Short description (up to few sentences):
Changelog entry (up to few sentences, required except for Non-significant/Documentation categories):
... ...
Detailed description (optional): Detailed description (optional):
... ...

70
.github/stale.yml vendored
View File

@ -1,70 +0,0 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 60
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: 30
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- bug
- feature
- memory
- performance
- prio-crit
- prio-major
- st-accepted
- st-in-progress
- st-waiting-for-fix
- segfault
- crash
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: false
# Set to true to ignore issues with an assignee (defaults to false)
exemptAssignees: false
# Label to use when marking as stale
staleLabel: stale
# Comment to post when marking as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. Please post a comment if this issue is still relevant to you.
Thank you for your contributions.
# Comment to post when removing the stale label.
# unmarkComment: >
# Your comment here.
# Comment to post when closing a stale Issue or Pull Request.
# closeComment: >
# Your comment here.
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 30
# Limit to only `issues` or `pulls`
# only: issues
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
pulls:
daysUntilStale: 365
daysUntilClose: 60
markComment: >
This pull request has been automatically marked as stale because it has not had
any activity for over a year. Please post a comment about whether you intend to continue working on it.
Thank you for your contributions.
# issues:
# exemptLabels:
# - confirmed

35
.gitmodules vendored
View File

@ -1,6 +1,7 @@
[submodule "contrib/poco"] [submodule "contrib/poco"]
path = contrib/poco path = contrib/poco
url = https://github.com/ClickHouse-Extras/poco url = https://github.com/ClickHouse-Extras/poco
branch = clickhouse
[submodule "contrib/zstd"] [submodule "contrib/zstd"]
path = contrib/zstd path = contrib/zstd
url = https://github.com/facebook/zstd.git url = https://github.com/facebook/zstd.git
@ -28,9 +29,6 @@
[submodule "contrib/re2"] [submodule "contrib/re2"]
path = contrib/re2 path = contrib/re2
url = https://github.com/google/re2.git url = https://github.com/google/re2.git
[submodule "contrib/ssl"]
path = contrib/ssl
url = https://github.com/ClickHouse-Extras/ssl.git
[submodule "contrib/llvm"] [submodule "contrib/llvm"]
path = contrib/llvm path = contrib/llvm
url = https://github.com/ClickHouse-Extras/llvm url = https://github.com/ClickHouse-Extras/llvm
@ -69,10 +67,10 @@
url = https://github.com/ClickHouse-Extras/libgsasl.git url = https://github.com/ClickHouse-Extras/libgsasl.git
[submodule "contrib/libcxx"] [submodule "contrib/libcxx"]
path = contrib/libcxx path = contrib/libcxx
url = https://github.com/llvm-mirror/libcxx.git url = https://github.com/ClickHouse-Extras/libcxx.git
[submodule "contrib/libcxxabi"] [submodule "contrib/libcxxabi"]
path = contrib/libcxxabi path = contrib/libcxxabi
url = https://github.com/llvm-mirror/libcxxabi.git url = https://github.com/ClickHouse-Extras/libcxxabi.git
[submodule "contrib/snappy"] [submodule "contrib/snappy"]
path = contrib/snappy path = contrib/snappy
url = https://github.com/google/snappy url = https://github.com/google/snappy
@ -106,3 +104,30 @@
[submodule "contrib/sparsehash-c11"] [submodule "contrib/sparsehash-c11"]
path = contrib/sparsehash-c11 path = contrib/sparsehash-c11
url = https://github.com/sparsehash/sparsehash-c11.git url = https://github.com/sparsehash/sparsehash-c11.git
[submodule "contrib/aws"]
path = contrib/aws
url = https://github.com/aws/aws-sdk-cpp.git
[submodule "aws-c-event-stream"]
path = contrib/aws-c-event-stream
url = https://github.com/awslabs/aws-c-event-stream.git
[submodule "aws-c-common"]
path = contrib/aws-c-common
url = https://github.com/awslabs/aws-c-common.git
[submodule "aws-checksums"]
path = contrib/aws-checksums
url = https://github.com/awslabs/aws-checksums.git
[submodule "contrib/curl"]
path = contrib/curl
url = https://github.com/curl/curl.git
[submodule "contrib/openssl"]
path = contrib/openssl
url = https://github.com/ClickHouse-Extras/openssl.git
[submodule "contrib/icudata"]
path = contrib/icudata
url = https://github.com/ClickHouse-Extras/icudata.git
[submodule "contrib/icu"]
path = contrib/icu
url = https://github.com/unicode-org/icu.git
[submodule "contrib/libc-headers"]
path = contrib/libc-headers
url = https://github.com/ClickHouse-Extras/libc-headers.git

View File

@ -1,3 +1,375 @@
## ClickHouse release v19.17.4.11, 2019-11-22
### Backward Incompatible Change
* Using column instead of AST to store scalar subquery results for better performance. Setting `enable_scalar_subquery_optimization` was added in 19.17 and it was enabled by default. It leads to errors like [this](https://github.com/ClickHouse/ClickHouse/issues/7851) during upgrade to 19.17.2 or 19.17.3 from previous versions. This setting was disabled by default in 19.17.4, to make possible upgrading from 19.16 and older versions without errors. [#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([Amos Bird](https://github.com/amosbird))
### New Feature
* Add the ability to create dictionaries with DDL queries. [#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([alesapin](https://github.com/alesapin))
* Make `bloom_filter` type of index supporting `LowCardinality` and `Nullable` [#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Nikolai Kochetov](https://github.com/KochetovNicolai))
* Add function `isValidJSON` to check that passed string is a valid json. [#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir))
* Implement `arrayCompact` function [#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Memo](https://github.com/Joeywzr))
* Created function `hex` for Decimal numbers. It works like `hex(reinterpretAsString())`, but doesn't delete last zero bytes. [#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb))
* Add `arrayFill` and `arrayReverseFill` functions, which replace elements by other elements in front/back of them in the array. [#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz))
* Add `CRC32IEEE()`/`CRC64()` support [#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat))
* Implement `char` function similar to one in [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li))
* Add `bitmapTransform` function. It transforms an array of values in a bitmap to another array of values, the result is a new bitmap [#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Yu](https://github.com/yuzhichang))
* Implemented `javaHashUTF16LE()` function [#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbab](https://github.com/achimbab))
* Add `_shard_num` virtual column for the Distributed engine [#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat))
### Experimental Feature
* Support for processors (new query execution pipeline) in `MergeTree`. [#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Nikolai Kochetov](https://github.com/KochetovNicolai))
### Bug Fix
* Fix incorrect float parsing in `Values` [#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix))
* Fix rare deadlock which can happen when trace_log is enabled. [#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov))
* Prevent message duplication when producing Kafka table has any MVs selecting from it [#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7))
* Support for `Array(LowCardinality(Nullable(String)))` in `IN`. Resolves [#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbab](https://github.com/achimbab))
* Add handling of `SQL_TINYINT` and `SQL_BIGINT`, and fix handling of `SQL_FLOAT` data source types in ODBC Bridge. [#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon))
* Fix aggregation (`avg` and quantiles) over empty decimal columns [#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Andrey Konyaev](https://github.com/akonyaev90))
* Fix `INSERT` into Distributed with `MATERIALIZED` columns [#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat))
* Make `MOVE PARTITION` work if some parts of partition are already on destination disk or volume [#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon))
* Fixed bug with hardlinks failing to be created during mutations in `ReplicatedMergeTree` in multi-disk configurations. [#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon))
* Fixed a bug with a mutation on a MergeTree when whole part remains unchanged and best space is being found on another disk [#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon))
* Fixed bug with `keep_free_space_ratio` not being read from disks configuration [#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon))
* Fix bug with table contains only `Tuple` columns or columns with complex paths. Fixes [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([alesapin](https://github.com/alesapin))
* Do not account memory for Buffer engine in max_memory_usage limit [#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat))
* Fix final mark usage in `MergeTree` tables ordered by `tuple()`. In rare cases it could lead to `Can't adjust last granule` error while select. [#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([Anton Popov](https://github.com/CurtizJ))
* Fix bug in mutations that have predicate with actions that require context (for example functions for json), which may lead to crashes or strange exceptions. [#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([alesapin](https://github.com/alesapin))
* Fix mismatch of database and table names escaping in `data/` and `shadow/` directories [#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak))
* Support duplicated keys in RIGHT|FULL JOINs, e.g. ```ON t.x = u.x AND t.x = u.y```. Fix crash in this case. [#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2))
* Fix `Not found column <expression> in block` when joining on expression with RIGHT or FULL JOIN. [#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2))
* One more attempt to fix infinite loop in `PrettySpace` format [#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia))
* Fix bug in `concat` function when all arguments were `FixedString` of the same size. [#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([alesapin](https://github.com/alesapin))
* Fixed exception in case of using 1 argument while defining S3, URL and HDFS storages. [#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon))
* Fix scope of the InterpreterSelectQuery for views with query [#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat))
### Improvement
* `Nullable` columns recognized and NULL-values handled correctly by ODBC-bridge [#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk))
* Write current batch for distributed send atomically [#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat))
* Throw an exception if we cannot detect table for column name in query. [#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2))
* Add `merge_max_block_size` setting to `MergeTreeSettings` [#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2))
* Queries with `HAVING` and without `GROUP BY` assume group by constant. So, `SELECT 1 HAVING 1` now returns a result. [#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([Amos Bird](https://github.com/amosbird))
* Support parsing `(X,)` as tuple similar to python. [#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([Amos Bird](https://github.com/amosbird))
* Make `range` function behaviors almost like pythonic one. [#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li))
* Add `constraints` columns to table `system.settings` [#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Vitaly Baranov](https://github.com/vitlibar))
* Better Null format for tcp handler, so that it's possible to use `select ignore(<expression>) from table format Null` for perf measure via clickhouse-client [#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([Amos Bird](https://github.com/amosbird))
* Queries like `CREATE TABLE ... AS (SELECT (1, 2))` are parsed correctly [#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz))
### Performance Improvement
* The performance of aggregation over short string keys is improved. [#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [Amos Bird](https://github.com/amosbird))
* Run another pass of syntax/expression analysis to get potential optimizations after constant predicates are folded. [#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([Amos Bird](https://github.com/amosbird))
* Use storage meta info to evaluate trivial `SELECT count() FROM table;` [#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([Amos Bird](https://github.com/amosbird), [alexey-milovidov](https://github.com/alexey-milovidov))
* Vectorize processing `arrayReduce` similar to Aggregator `addBatch`. [#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Amos Bird](https://github.com/amosbird))
* Minor improvements in performance of `Kafka` consumption [#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([Ivan](https://github.com/abyss7))
### Build/Testing/Packaging Improvement
* Add support for cross-compiling to the CPU architecture AARCH64. Refactor packager script. [#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([Ivan](https://github.com/abyss7))
* Unpack darwin-x86_64 and linux-aarch64 toolchains into mounted Docker volume when building packages [#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([Ivan](https://github.com/abyss7))
* Update Docker Image for Binary Packager [#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([Ivan](https://github.com/abyss7))
* Fixed compile errors on MacOS Catalina [#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Ernest Poletaev](https://github.com/ernestp))
* Some refactoring in query analysis logic: split complex class into several simple ones. [#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2))
* Fix build without submodules [#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller))
* Better `add_globs` in CMake files [#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([Amos Bird](https://github.com/amosbird))
* Remove hardcoded paths in `unwind` target [#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok))
* Allow to use mysql format without ssl [#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller))
### Other
* Added ANTLR4 grammar for ClickHouse SQL dialect [#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([alexey-milovidov](https://github.com/alexey-milovidov))
## ClickHouse release v19.16.2.2, 2019-10-30
### Backward Incompatible Change
* Add missing arity validation for count/counIf.
[#7095](https://github.com/ClickHouse/ClickHouse/issues/7095)
[#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir))
* Remove legacy `asterisk_left_columns_only` setting (it was disabled by default).
[#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Artem
Zuikov](https://github.com/4ertus2))
* Format strings for Template data format are now specified in files.
[#7118](https://github.com/ClickHouse/ClickHouse/pull/7118)
([tavplubix](https://github.com/tavplubix))
### New Feature
* Introduce uniqCombined64() to calculate cardinality greater than UINT_MAX.
[#7213](https://github.com/ClickHouse/ClickHouse/pull/7213),
[#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat
Khuzhin](https://github.com/azat))
* Support Bloom filter indexes on Array columns.
[#6984](https://github.com/ClickHouse/ClickHouse/pull/6984)
([achimbab](https://github.com/achimbab))
* Add a function `getMacro(name)` that returns String with the value of corresponding `<macros>`
from server configuration. [#7240](https://github.com/ClickHouse/ClickHouse/pull/7240)
([alexey-milovidov](https://github.com/alexey-milovidov))
* Set two configuration options for a dictionary based on an HTTP source: `credentials` and
`http-headers`. [#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Guillaume
Tassery](https://github.com/YiuRULE))
* Add a new ProfileEvent `Merge` that counts the number of launched background merges.
[#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([Mikhail
Korotov](https://github.com/millb))
* Add fullHostName function that returns a fully qualified domain name.
[#7263](https://github.com/ClickHouse/ClickHouse/issues/7263)
[#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li))
* Add function `arraySplit` and `arrayReverseSplit` which split an array by "cut off"
conditions. They are useful in time sequence handling.
[#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz))
* Add new functions that return the Array of all matched indices in multiMatch family of functions.
[#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Danila
Kutenin](https://github.com/danlark1))
* Add a new database engine `Lazy` that is optimized for storing a large number of small -Log
tables. [#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([Nikita
Vasilev](https://github.com/nikvas0))
* Add aggregate functions groupBitmapAnd, -Or, -Xor for bitmap columns. [#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Zhichang
Yu](https://github.com/yuzhichang))
* Add aggregate function combinators -OrNull and -OrDefault, which return null
or default values when there is nothing to aggregate.
[#7331](https://github.com/ClickHouse/ClickHouse/pull/7331)
([hcz](https://github.com/hczhcz))
* Introduce CustomSeparated data format that supports custom escaping and
delimiter rules. [#7118](https://github.com/ClickHouse/ClickHouse/pull/7118)
([tavplubix](https://github.com/tavplubix))
* Support Redis as source of external dictionary. [#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Anton
Popov](https://github.com/CurtizJ))
### Bug Fix
* Fix wrong query result if it has `WHERE IN (SELECT ...)` section and `optimize_read_in_order` is
used. [#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([Anton
Popov](https://github.com/CurtizJ))
* Disabled MariaDB authentication plugin, which depends on files outside of project.
[#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([Yuriy
Baranov](https://github.com/yurriy))
* Fix exception `Cannot convert column ... because it is constant but values of constants are
different in source and result` which could rarely happen when functions `now()`, `today()`,
`yesterday()`, `randConstant()` are used.
[#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([Nikolai
Kochetov](https://github.com/KochetovNicolai))
* Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout.
[#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily
Nemkov](https://github.com/Enmk))
* Fixed a segmentation fault in groupBitmapOr (issue [#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)).
[#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Zhichang
Yu](https://github.com/yuzhichang))
* For materialized views the commit for Kafka is called after all data were written.
[#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7))
* Fixed wrong `duration_ms` value in `system.part_log` table. It was ten times off.
[#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Vladimir
Chebotarev](https://github.com/excitoon))
* A quick fix to resolve crash in LIVE VIEW table and re-enabling all LIVE VIEW tests.
[#7201](https://github.com/ClickHouse/ClickHouse/pull/7201)
([vzakaznikov](https://github.com/vzakaznikov))
* Serialize NULL values correctly in min/max indexes of MergeTree parts.
[#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander
Kuzmenkov](https://github.com/akuzm))
* Don't put virtual columns to .sql metadata when table is created as `CREATE TABLE AS`.
[#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([Ivan](https://github.com/abyss7))
* Fix segmentation fault in `ATTACH PART` query.
[#7185](https://github.com/ClickHouse/ClickHouse/pull/7185)
([alesapin](https://github.com/alesapin))
* Fix wrong result for some queries given by the optimization of empty IN subqueries and empty
INNER/RIGHT JOIN. [#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([Nikolai
Kochetov](https://github.com/KochetovNicolai))
* Fixing AddressSanitizer error in the LIVE VIEW getHeader() method.
[#7271](https://github.com/ClickHouse/ClickHouse/pull/7271)
([vzakaznikov](https://github.com/vzakaznikov))
### Improvement
* Add a message in case of queue_wait_max_ms wait takes place.
[#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat
Khuzhin](https://github.com/azat))
* Made setting `s3_min_upload_part_size` table-level.
[#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Vladimir
Chebotarev](https://github.com/excitoon))
* Check TTL in StorageFactory. [#7304](https://github.com/ClickHouse/ClickHouse/pull/7304)
([sundyli](https://github.com/sundy-li))
* Squash left-hand blocks in partial merge join (optimization).
[#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Artem
Zuikov](https://github.com/4ertus2))
* Do not allow non-deterministic functions in mutations of Replicated table engines, because this
can introduce inconsistencies between replicas.
[#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([Alexander
Kazakov](https://github.com/Akazz))
* Disable memory tracker while converting exception stack trace to string. It can prevent the loss
of error messages of type `Memory limit exceeded` on server, which caused the `Attempt to read
after eof` exception on client. [#7264](https://github.com/ClickHouse/ClickHouse/pull/7264)
([Nikolai Kochetov](https://github.com/KochetovNicolai))
* Miscellaneous format improvements. Resolves
[#6033](https://github.com/ClickHouse/ClickHouse/issues/6033),
[#2633](https://github.com/ClickHouse/ClickHouse/issues/2633),
[#6611](https://github.com/ClickHouse/ClickHouse/issues/6611),
[#6742](https://github.com/ClickHouse/ClickHouse/issues/6742)
[#7215](https://github.com/ClickHouse/ClickHouse/pull/7215)
([tavplubix](https://github.com/tavplubix))
* ClickHouse ignores values on the right side of IN operator that are not convertible to the left
side type. Make it work properly for compound types -- Array and Tuple.
[#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([Alexander
Kuzmenkov](https://github.com/akuzm))
* Support missing inequalities for ASOF JOIN. It's possible to join less-or-equal variant and strict
greater and less variants for ASOF column in ON syntax.
[#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Artem
Zuikov](https://github.com/4ertus2))
* Optimize partial merge join. [#7070](https://github.com/ClickHouse/ClickHouse/pull/7070)
([Artem Zuikov](https://github.com/4ertus2))
* Do not use more than 98K of memory in uniqCombined functions.
[#7236](https://github.com/ClickHouse/ClickHouse/pull/7236),
[#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat
Khuzhin](https://github.com/azat))
* Flush parts of right-hand joining table on disk in PartialMergeJoin (if there is not enough
memory). Load data back when needed. [#7186](https://github.com/ClickHouse/ClickHouse/pull/7186)
([Artem Zuikov](https://github.com/4ertus2))
### Performance Improvement
* Speed up joinGet with const arguments by avoiding data duplication.
[#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Amos
Bird](https://github.com/amosbird))
* Return early if the subquery is empty.
[#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu))
* Optimize parsing of SQL expression in Values.
[#6781](https://github.com/ClickHouse/ClickHouse/pull/6781)
([tavplubix](https://github.com/tavplubix))
### Build/Testing/Packaging Improvement
* Disable some contribs for cross-compilation to Mac OS.
[#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([Ivan](https://github.com/abyss7))
* Add missing linking with PocoXML for clickhouse_common_io.
[#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat
Khuzhin](https://github.com/azat))
* Accept multiple test filter arguments in clickhouse-test.
[#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([Alexander
Kuzmenkov](https://github.com/akuzm))
* Enable musl and jemalloc for ARM. [#7300](https://github.com/ClickHouse/ClickHouse/pull/7300)
([Amos Bird](https://github.com/amosbird))
* Added `--client-option` parameter to `clickhouse-test` to pass additional parameters to client.
[#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([Nikolai
Kochetov](https://github.com/KochetovNicolai))
* Preserve existing configs on rpm package upgrade.
[#7103](https://github.com/ClickHouse/ClickHouse/pull/7103)
([filimonov](https://github.com/filimonov))
* Fix errors detected by PVS. [#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Artem
Zuikov](https://github.com/4ertus2))
* Fix build for Darwin. [#7149](https://github.com/ClickHouse/ClickHouse/pull/7149)
([Ivan](https://github.com/abyss7))
* glibc 2.29 compatibility. [#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Amos
Bird](https://github.com/amosbird))
* Make sure dh_clean does not touch potential source files.
[#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Amos
Bird](https://github.com/amosbird))
* Attempt to avoid conflict when updating from altinity rpm - it has config file packaged separately
in clickhouse-server-common. [#7073](https://github.com/ClickHouse/ClickHouse/pull/7073)
([filimonov](https://github.com/filimonov))
* Optimize some header files for faster rebuilds.
[#7212](https://github.com/ClickHouse/ClickHouse/pull/7212),
[#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([Alexander
Kuzmenkov](https://github.com/akuzm))
* Add performance tests for Date and DateTime. [#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([Vasily
Nemkov](https://github.com/Enmk))
* Fix some tests that contained non-deterministic mutations.
[#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([Alexander
Kazakov](https://github.com/Akazz))
* Add build with MemorySanitizer to CI. [#7066](https://github.com/ClickHouse/ClickHouse/pull/7066)
([Alexander Kuzmenkov](https://github.com/akuzm))
* Avoid use of uninitialized values in MetricsTransmitter.
[#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat
Khuzhin](https://github.com/azat))
* Fix some issues in Fields found by MemorySanitizer.
[#7135](https://github.com/ClickHouse/ClickHouse/pull/7135),
[#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([Alexander
Kuzmenkov](https://github.com/akuzm)), [#7376](https://github.com/ClickHouse/ClickHouse/pull/7376)
([Amos Bird](https://github.com/amosbird))
* Fix undefined behavior in murmurhash32. [#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Amos
Bird](https://github.com/amosbird))
* Fix undefined behavior in StoragesInfoStream. [#7384](https://github.com/ClickHouse/ClickHouse/pull/7384)
([tavplubix](https://github.com/tavplubix))
* Fixed constant expressions folding for external database engines (MySQL, ODBC, JDBC). In previous
versions it wasn't working for multiple constant expressions and was not working at all for Date,
DateTime and UUID. This fixes [#7245](https://github.com/ClickHouse/ClickHouse/issues/7245)
[#7252](https://github.com/ClickHouse/ClickHouse/pull/7252)
([alexey-milovidov](https://github.com/alexey-milovidov))
* Fixing ThreadSanitizer data race error in the LIVE VIEW when accessing no_users_thread variable.
[#7353](https://github.com/ClickHouse/ClickHouse/pull/7353)
([vzakaznikov](https://github.com/vzakaznikov))
* Get rid of malloc symbols in libcommon
[#7134](https://github.com/ClickHouse/ClickHouse/pull/7134),
[#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Amos
Bird](https://github.com/amosbird))
* Add global flag ENABLE_LIBRARIES for disabling all libraries.
[#7063](https://github.com/ClickHouse/ClickHouse/pull/7063)
([proller](https://github.com/proller))
### Code cleanup
* Generalize configuration repository to prepare for DDL for Dictionaries. [#7155](https://github.com/ClickHouse/ClickHouse/pull/7155)
([alesapin](https://github.com/alesapin))
* Parser for dictionaries DDL without any semantic.
[#7209](https://github.com/ClickHouse/ClickHouse/pull/7209)
([alesapin](https://github.com/alesapin))
* Split ParserCreateQuery into different smaller parsers.
[#7253](https://github.com/ClickHouse/ClickHouse/pull/7253)
([alesapin](https://github.com/alesapin))
* Small refactoring and renaming near external dictionaries.
[#7111](https://github.com/ClickHouse/ClickHouse/pull/7111)
([alesapin](https://github.com/alesapin))
* Refactor some code to prepare for role-based access control. [#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([Vitaly
Baranov](https://github.com/vitlibar))
* Some improvements in DatabaseOrdinary code.
[#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([Nikita
Vasilev](https://github.com/nikvas0))
* Do not use iterators in find() and emplace() methods of hash tables.
[#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([Alexander
Kuzmenkov](https://github.com/akuzm))
* Fix getMultipleValuesFromConfig in case when parameter root is not empty. [#7374](https://github.com/ClickHouse/ClickHouse/pull/7374)
([Mikhail Korotov](https://github.com/millb))
* Remove some copy-paste (TemporaryFile and TemporaryFileStream)
[#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Artem
Zuikov](https://github.com/4ertus2))
* Improved code readability a little bit (`MergeTreeData::getActiveContainingPart`).
[#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Vladimir
Chebotarev](https://github.com/excitoon))
* Wait for all scheduled jobs, which are using local objects, if `ThreadPool::schedule(...)` throws
an exception. Rename `ThreadPool::schedule(...)` to `ThreadPool::scheduleOrThrowOnError(...)` and
fix comments to make obvious that it may throw.
[#7350](https://github.com/ClickHouse/ClickHouse/pull/7350)
([tavplubix](https://github.com/tavplubix))
## ClickHouse release 19.15.4.10, 2019-10-31
### Bug Fix
* Added handling of SQL_TINYINT and SQL_BIGINT, and fix handling of SQL_FLOAT data source types in ODBC Bridge.
[#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon))
* Allowed to have some parts on destination disk or volume in MOVE PARTITION.
[#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon))
* Fixed NULL-values in nullable columns through ODBC-bridge.
[#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk))
* Fixed INSERT into Distributed non local node with MATERIALIZED columns.
[#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat))
* Fixed function getMultipleValuesFromConfig.
[#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Mikhail Korotov](https://github.com/millb))
* Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout.
[#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily Nemkov](https://github.com/Enmk))
* Wait for all jobs to finish on exception (fixes rare segfaults).
[#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix))
* Don't push to MVs when inserting into Kafka table.
[#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7))
* Disable memory tracker for exception stack.
[#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Nikolai Kochetov](https://github.com/KochetovNicolai))
* Fixed bad code in transforming query for external database.
[#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Avoid use of uninitialized values in MetricsTransmitter.
[#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat))
* Added example config with macros for tests ([alexey-milovidov](https://github.com/alexey-milovidov))
## ClickHouse release 19.15.3.6, 2019-10-09
### Bug Fix
* Fixed bad_variant in hashed dictionary.
([alesapin](https://github.com/alesapin))
* Fixed up bug with segmentation fault in ATTACH PART query.
([alesapin](https://github.com/alesapin))
* Fixed time calculation in `MergeTreeData`.
([Vladimir Chebotarev](https://github.com/excitoon))
* Commit to Kafka explicitly after the writing is finalized.
[#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7))
* Serialize NULL values correctly in min/max indexes of MergeTree parts.
[#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm))
## ClickHouse release 19.15.2.2, 2019-10-01 ## ClickHouse release 19.15.2.2, 2019-10-01
### New Feature ### New Feature
@ -105,7 +477,7 @@
* Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Anton Popov](https://github.com/CurtizJ)) * Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Anton Popov](https://github.com/CurtizJ))
* Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Removed extra verbose logging in MySQL interface [#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Removed extra verbose logging in MySQL interface [#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Return ability to parse boolean settings from 'true' and 'false' in configuration file. [#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) * Return the ability to parse boolean settings from 'true' and 'false' in the configuration file. [#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin))
* Fix crash in `quantile` and `median` function over `Nullable(Decimal128)`. [#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) * Fix crash in `quantile` and `median` function over `Nullable(Decimal128)`. [#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2))
* Fixed possible incomplete result returned by `SELECT` query with `WHERE` condition on primary key contained conversion to Float type. It was caused by incorrect checking of monotonicity in `toFloat` function. [#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) * Fixed possible incomplete result returned by `SELECT` query with `WHERE` condition on primary key contained conversion to Float type. It was caused by incorrect checking of monotonicity in `toFloat` function. [#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000))
* Check `max_expanded_ast_elements` setting for mutations. Clear mutations after `TRUNCATE TABLE`. [#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([Winter Zhang](https://github.com/zhang2014)) * Check `max_expanded_ast_elements` setting for mutations. Clear mutations after `TRUNCATE TABLE`. [#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([Winter Zhang](https://github.com/zhang2014))
@ -133,8 +505,8 @@
* Fix bug with writing secondary indices marks with adaptive granularity. [#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) * Fix bug with writing secondary indices marks with adaptive granularity. [#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin))
* Fix initialization order while server startup. Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) * Fix initialization order while server startup. Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7))
* Clearing the data buffer from the previous read operation that was completed with an error. [#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) * Clearing the data buffer from the previous read operation that was completed with an error. [#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa))
* Fix bug with enabling adaptive granularity when creating new replica for Replicated*MergeTree table. [#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) * Fix bug with enabling adaptive granularity when creating a new replica for Replicated*MergeTree table. [#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin))
* Fixed possible crash during server startup in case of exception happened in `libunwind` during exception at access to uninitialised `ThreadStatus` structure. [#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) * Fixed possible crash during server startup in case of exception happened in `libunwind` during exception at access to uninitialized `ThreadStatus` structure. [#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov))
* Fix crash in `yandexConsistentHash` function. Found by fuzz test. [#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fix crash in `yandexConsistentHash` function. Found by fuzz test. [#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Fixed the possibility of hanging queries when server is overloaded and global thread pool becomes near full. This have higher chance to happen on clusters with large number of shards (hundreds), because distributed queries allocate a thread per connection to each shard. For example, this issue may reproduce if a cluster of 330 shards is processing 30 concurrent distributed queries. This issue affects all versions starting from 19.2. [#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fixed the possibility of hanging queries when server is overloaded and global thread pool becomes near full. This have higher chance to happen on clusters with large number of shards (hundreds), because distributed queries allocate a thread per connection to each shard. For example, this issue may reproduce if a cluster of 330 shards is processing 30 concurrent distributed queries. This issue affects all versions starting from 19.2. [#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Fixed logic of `arrayEnumerateUniqRanked` function. [#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fixed logic of `arrayEnumerateUniqRanked` function. [#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov))
@ -307,7 +679,7 @@
### Backward Incompatible Change ### Backward Incompatible Change
* Removed rarely used table function `catBoostPool` and storage `CatBoostPool`. If you have used this table function, please write email to `clickhouse-feedback@yandex-team.com`. Note that CatBoost integration remains and will be supported. [#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Removed rarely used table function `catBoostPool` and storage `CatBoostPool`. If you have used this table function, please write email to `clickhouse-feedback@yandex-team.com`. Note that CatBoost integration remains and will be supported. [#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([alexey-milovidov](https://github.com/alexey-milovidov))
* Disable `ANY RIGHT JOIN` and `ANY FULL JOIN` by default. Set `any_join_get_any_from_right_table` setting to enable them. [#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) * Disable `ANY RIGHT JOIN` and `ANY FULL JOIN` by default. Set `any_join_distinct_right_table_keys` setting to enable them. [#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2))
## ClickHouse release 19.13.6.51, 2019-10-02 ## ClickHouse release 19.13.6.51, 2019-10-02
@ -345,6 +717,13 @@
### Security Fix ### Security Fix
* Fix two vulnerabilities in codecs in decompression phase (malicious user can fabricate compressed data that will lead to buffer overflow in decompression). [#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) * Fix two vulnerabilities in codecs in decompression phase (malicious user can fabricate compressed data that will lead to buffer overflow in decompression). [#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2))
## ClickHouse release 19.11.13.74, 2019-11-01
### Bug Fix
* Fixed rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows). [#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin))
* Manual update of `SIMDJSON`. This fixes possible flooding of stderr files with bogus json diagnostic messages. [#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Alexander Kazakov](https://github.com/Akazz))
* Fixed bug with `mrk` file extension for mutations ([alesapin](https://github.com/alesapin))
## ClickHouse release 19.11.12.69, 2019-10-02 ## ClickHouse release 19.11.12.69, 2019-10-02
### Bug Fix ### Bug Fix
@ -371,7 +750,7 @@
* Fix kafka tests. [#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Ivan](https://github.com/abyss7)) * Fix kafka tests. [#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Ivan](https://github.com/abyss7))
### Security Fix ### Security Fix
* If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse run, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) * If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse runs, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov))
## ClickHouse release 19.13.3.26, 2019-08-22 ## ClickHouse release 19.13.3.26, 2019-08-22
@ -399,7 +778,7 @@
* Now client receive logs from server with any desired level by setting `send_logs_level` regardless to the log level specified in server settings. [#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) * Now client receive logs from server with any desired level by setting `send_logs_level` regardless to the log level specified in server settings. [#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov))
### Backward Incompatible Change ### Backward Incompatible Change
* The setting `input_format_defaults_for_omitted_fields` is enabled by default. Inserts in Distibuted tables need this setting to be the same on cluster (you need to set it before rolling update). It enables calculation of complex default expressions for omitted fields in `JSONEachRow` and `CSV*` formats. It should be the expected behaviour but may lead to negligible performance difference. [#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) * The setting `input_format_defaults_for_omitted_fields` is enabled by default. Inserts in Distributed tables need this setting to be the same on cluster (you need to set it before rolling update). It enables calculation of complex default expressions for omitted fields in `JSONEachRow` and `CSV*` formats. It should be the expected behavior but may lead to negligible performance difference. [#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm))
### Experimental features ### Experimental features
* New query processing pipeline. Use `experimental_use_processors=1` option to enable it. Use for your own trouble. [#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) * New query processing pipeline. Use `experimental_use_processors=1` option to enable it. Use for your own trouble. [#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Nikolai Kochetov](https://github.com/KochetovNicolai))
@ -1180,7 +1559,7 @@ lee](https://github.com/neverlee))
### Bug fixes ### Bug fixes
* Fixed error in #3920. This error manifestate itself as random cache corruption (messages `Unknown codec family code`, `Cannot seek through file`) and segfaults. This bug first appeared in version 19.1 and is present in versions up to 19.1.10 and 19.3.6. [#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Fixed error in #3920. This error manifests itself as random cache corruption (messages `Unknown codec family code`, `Cannot seek through file`) and segfaults. This bug first appeared in version 19.1 and is present in versions up to 19.1.10 and 19.3.6. [#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([alexey-milovidov](https://github.com/alexey-milovidov))
## ClickHouse release 19.3.6, 2019-03-02 ## ClickHouse release 19.3.6, 2019-03-02
@ -2037,7 +2416,7 @@ The expression must be a chain of equalities joined by the AND operator. Each si
### Improvements: ### Improvements:
* Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backwards compatible, unless otherwise stated in the changelog. * Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backward compatible, unless otherwise stated in the changelog.
* Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2664)). * Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2664)).
* If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/ClickHouse/ClickHouse/pull/2669)). * If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/ClickHouse/ClickHouse/pull/2669)).
@ -2236,7 +2615,7 @@ The expression must be a chain of equalities joined by the AND operator. Each si
* Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1` * Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1`
* Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server's display name can be changed. It's also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov). * Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server's display name can be changed. It's also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov).
* Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson) * Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson)
* When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was cancelled` exception instead of an incomplete result. * When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was canceled` exception instead of an incomplete result.
### Improvements: ### Improvements:

View File

@ -198,14 +198,21 @@ if(WITH_COVERAGE AND COMPILER_GCC)
endif() endif()
set (CMAKE_BUILD_COLOR_MAKEFILE ON) set (CMAKE_BUILD_COLOR_MAKEFILE ON)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS}")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${COMMON_WARNING_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}")
if (COMPILER_CLANG)
# Exception unwinding doesn't work in clang release build without this option
# TODO investigate if contrib/libcxxabi is out of date
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer")
endif ()
option (ENABLE_LIBRARIES "Enable all libraries (Global default switch)" ON) option (ENABLE_LIBRARIES "Enable all libraries (Global default switch)" ON)
option (UNBUNDLED "Try find all libraries in system. We recommend to avoid this mode for production builds, because we cannot guarantee exact versions and variants of libraries your system has installed. This mode exists for enthusiastic developers who search for trouble. Also it is useful for maintainers of OS packages." OFF) option (UNBUNDLED "Try find all libraries in system. We recommend to avoid this mode for production builds, because we cannot guarantee exact versions and variants of libraries your system has installed. This mode exists for enthusiastic developers who search for trouble. Also it is useful for maintainers of OS packages." OFF)
@ -215,7 +222,7 @@ else ()
set(NOT_UNBUNDLED 1) set(NOT_UNBUNDLED 1)
endif () endif ()
# Using system libs can cause lot of warnings in includes. # Using system libs can cause lot of warnings in includes (on macro expansion).
if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32) if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32)
option (NO_WERROR "Disable -Werror compiler option" ON) option (NO_WERROR "Disable -Werror compiler option" ON)
endif () endif ()
@ -325,6 +332,7 @@ include (cmake/find/brotli.cmake)
include (cmake/find/protobuf.cmake) include (cmake/find/protobuf.cmake)
include (cmake/find/pdqsort.cmake) include (cmake/find/pdqsort.cmake)
include (cmake/find/hdfs3.cmake) # uses protobuf include (cmake/find/hdfs3.cmake) # uses protobuf
include (cmake/find/s3.cmake)
include (cmake/find/consistent-hashing.cmake) include (cmake/find/consistent-hashing.cmake)
include (cmake/find/base64.cmake) include (cmake/find/base64.cmake)
include (cmake/find/parquet.cmake) include (cmake/find/parquet.cmake)
@ -344,7 +352,6 @@ if (ENABLE_TESTS)
endif () endif ()
# Need to process before "contrib" dir: # Need to process before "contrib" dir:
include (libs/libcommon/cmake/find_gperftools.cmake)
include (libs/libcommon/cmake/find_jemalloc.cmake) include (libs/libcommon/cmake/find_jemalloc.cmake)
include (libs/libcommon/cmake/find_cctz.cmake) include (libs/libcommon/cmake/find_cctz.cmake)
include (libs/libmysqlxx/cmake/find_mysqlclient.cmake) include (libs/libmysqlxx/cmake/find_mysqlclient.cmake)
@ -354,18 +361,6 @@ include (libs/libmysqlxx/cmake/find_mysqlclient.cmake)
if (USE_JEMALLOC) if (USE_JEMALLOC)
message (STATUS "Link jemalloc: ${JEMALLOC_LIBRARIES}") message (STATUS "Link jemalloc: ${JEMALLOC_LIBRARIES}")
set (MALLOC_LIBRARIES ${JEMALLOC_LIBRARIES}) set (MALLOC_LIBRARIES ${JEMALLOC_LIBRARIES})
elseif (USE_TCMALLOC)
if (DEBUG_TCMALLOC AND NOT GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG)
message (FATAL_ERROR "Requested DEBUG_TCMALLOC but debug library is not found. You should install Google Perftools. Example: sudo apt-get install libgoogle-perftools-dev")
endif ()
if (DEBUG_TCMALLOC AND GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG)
message (STATUS "Link libtcmalloc_minimal_debug for testing: ${GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG}")
set (MALLOC_LIBRARIES ${GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG})
else ()
message (STATUS "Link libtcmalloc_minimal: ${GPERFTOOLS_TCMALLOC_MINIMAL}")
set (MALLOC_LIBRARIES ${GPERFTOOLS_TCMALLOC_MINIMAL})
endif ()
elseif (SANITIZE) elseif (SANITIZE)
message (STATUS "Will use ${SANITIZE} sanitizer.") message (STATUS "Will use ${SANITIZE} sanitizer.")
elseif (OS_LINUX) elseif (OS_LINUX)
@ -382,16 +377,12 @@ add_subdirectory (contrib EXCLUDE_FROM_ALL)
macro (add_executable target) macro (add_executable target)
# invoke built-in add_executable # invoke built-in add_executable
_add_executable (${ARGV}) # explicitly acquire and interpose malloc symbols by clickhouse_malloc
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc>)
get_target_property (type ${target} TYPE) get_target_property (type ${target} TYPE)
if (${type} STREQUAL EXECUTABLE) if (${type} STREQUAL EXECUTABLE)
file (RELATIVE_PATH dir ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) # operator::new/delete for executables (MemoryTracker stuff)
if (${dir} MATCHES "^dbms") target_link_libraries (${target} PRIVATE clickhouse_new_delete ${MALLOC_LIBRARIES})
# Only interpose operator::new/delete for dbms executables (MemoryTracker stuff)
target_link_libraries (${target} PRIVATE clickhouse_new_delete ${MALLOC_LIBRARIES})
else ()
target_link_libraries (${target} PRIVATE ${MALLOC_LIBRARIES})
endif ()
endif() endif()
endmacro() endmacro()

View File

@ -11,11 +11,3 @@ ClickHouse is an open-source column-oriented database management system that all
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announces and reports about events. * [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announces and reports about events.
* [Contacts](https://clickhouse.yandex/#contacts) can help to get your questions answered if there are any. * [Contacts](https://clickhouse.yandex/#contacts) can help to get your questions answered if there are any.
* You can also [fill this form](https://forms.yandex.com/surveys/meet-yandex-clickhouse-team/) to meet Yandex ClickHouse team in person. * You can also [fill this form](https://forms.yandex.com/surveys/meet-yandex-clickhouse-team/) to meet Yandex ClickHouse team in person.
## Upcoming Events
* [ClickHouse Meetup in Tokyo](https://clickhouse.connpass.com/event/147001/) on November 14.
* [ClickHouse Meetup in Istanbul](https://www.eventbrite.com/e/clickhouse-meetup-istanbul-create-blazing-fast-experiences-w-clickhouse-tickets-73101120419) on November 19.
* [ClickHouse Meetup in Ankara](https://www.eventbrite.com/e/clickhouse-meetup-ankara-create-blazing-fast-experiences-w-clickhouse-tickets-73100530655) on November 21.
* [ClickHouse Meetup in Singapore](https://www.meetup.com/Singapore-Clickhouse-Meetup-Group/events/265085331/) on November 23.
* [ClickHouse Meetup in San Francisco](https://www.eventbrite.com/e/clickhouse-december-meetup-registration-78642047481) on December 3.

View File

@ -1,61 +0,0 @@
# https://github.com/vast-io/vast/blob/master/cmake/FindGperftools.cmake
# Tries to find Gperftools.
#
# Usage of this module as follows:
#
# find_package(Gperftools)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# Gperftools_ROOT_DIR Set this variable to the root installation of
# Gperftools if the module has problems finding
# the proper installation path.
#
# Variables defined by this module:
#
# GPERFTOOLS_FOUND System has Gperftools libs/headers
# GPERFTOOLS_LIBRARIES The Gperftools libraries (tcmalloc & profiler)
# GPERFTOOLS_INCLUDE_DIR The location of Gperftools headers
find_library(GPERFTOOLS_TCMALLOC
NAMES tcmalloc
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_TCMALLOC_MINIMAL
NAMES tcmalloc_minimal
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG
NAMES tcmalloc_minimal_debug
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_PROFILER
NAMES profiler
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_TCMALLOC_AND_PROFILER
NAMES tcmalloc_and_profiler
HINTS ${Gperftools_ROOT_DIR}/lib)
find_path(GPERFTOOLS_INCLUDE_DIR
NAMES gperftools/heap-profiler.h
HINTS ${Gperftools_ROOT_DIR}/include)
set(GPERFTOOLS_LIBRARIES ${GPERFTOOLS_TCMALLOC_AND_PROFILER})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(
Gperftools
DEFAULT_MSG
GPERFTOOLS_LIBRARIES
GPERFTOOLS_INCLUDE_DIR)
mark_as_advanced(
Gperftools_ROOT_DIR
GPERFTOOLS_TCMALLOC
GPERFTOOLS_PROFILER
GPERFTOOLS_TCMALLOC_AND_PROFILER
GPERFTOOLS_LIBRARIES
GPERFTOOLS_INCLUDE_DIR)

View File

@ -19,6 +19,6 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
set (ARCH_PPC64LE 1) set (ARCH_PPC64LE 1)
# FIXME: move this check into tools.cmake # FIXME: move this check into tools.cmake
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8)) if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
message(FATAL_ERROR "Only gcc-8 is supported for powerpc architecture") message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
endif () endif ()
endif () endif ()

View File

@ -4,6 +4,14 @@ if (ENABLE_CAPNP)
option (USE_INTERNAL_CAPNP_LIBRARY "Set to FALSE to use system capnproto library instead of bundled" ${NOT_UNBUNDLED}) option (USE_INTERNAL_CAPNP_LIBRARY "Set to FALSE to use system capnproto library instead of bundled" ${NOT_UNBUNDLED})
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/capnproto/CMakeLists.txt")
if(USE_INTERNAL_CAPNP_LIBRARY)
message(WARNING "submodule contrib/capnproto is missing. to fix try run: \n git submodule update --init --recursive")
endif()
set(MISSING_INTERNAL_CAPNP_LIBRARY 1)
set(USE_INTERNAL_CAPNP_LIBRARY 0)
endif()
# FIXME: refactor to use `add_library( IMPORTED)` if possible. # FIXME: refactor to use `add_library( IMPORTED)` if possible.
if (NOT USE_INTERNAL_CAPNP_LIBRARY) if (NOT USE_INTERNAL_CAPNP_LIBRARY)
find_library (KJ kj) find_library (KJ kj)
@ -11,7 +19,7 @@ if (NOT USE_INTERNAL_CAPNP_LIBRARY)
find_library (CAPNPC capnpc) find_library (CAPNPC capnpc)
set (CAPNP_LIBRARIES ${CAPNPC} ${CAPNP} ${KJ}) set (CAPNP_LIBRARIES ${CAPNPC} ${CAPNP} ${KJ})
else () elseif(NOT MISSING_INTERNAL_CAPNP_LIBRARY)
add_subdirectory(contrib/capnproto-cmake) add_subdirectory(contrib/capnproto-cmake)
set (CAPNP_LIBRARIES capnpc) set (CAPNP_LIBRARIES capnpc)
@ -23,4 +31,4 @@ endif ()
endif () endif ()
message (STATUS "Using capnp: ${CAPNP_LIBRARIES}") message (STATUS "Using capnp=${USE_CAPNP}: ${CAPNP_LIBRARIES}")

View File

@ -1,6 +1,18 @@
option(ENABLE_ICU "Enable ICU" ${ENABLE_LIBRARIES}) option(ENABLE_ICU "Enable ICU" ${ENABLE_LIBRARIES})
if(ENABLE_ICU) if (ENABLE_ICU)
option (USE_INTERNAL_ICU_LIBRARY "Set to FALSE to use system ICU library instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/LICENSE")
if (USE_INTERNAL_ICU_LIBRARY)
message (WARNING "submodule contrib/icu is missing. to fix try run: \n git submodule update --init --recursive")
set (USE_INTERNAL_ICU_LIBRARY 0)
endif ()
set (MISSING_INTERNAL_ICU_LIBRARY 1)
endif ()
if(NOT USE_INTERNAL_ICU_LIBRARY)
if (APPLE) if (APPLE)
set(ICU_ROOT "/usr/local/opt/icu4c" CACHE STRING "") set(ICU_ROOT "/usr/local/opt/icu4c" CACHE STRING "")
endif() endif()
@ -11,6 +23,16 @@ if(ENABLE_ICU)
endif() endif()
endif() endif()
if (ICU_LIBRARY AND ICU_INCLUDE_DIR)
set (USE_ICU 1)
elseif (NOT MISSING_INTERNAL_ICU_LIBRARY)
set (USE_INTERNAL_ICU_LIBRARY 1)
set (ICU_LIBRARIES icui18n icuuc icudata)
set (USE_ICU 1)
endif ()
endif()
if(USE_ICU) if(USE_ICU)
message(STATUS "Using icu=${USE_ICU}: ${ICU_INCLUDE_DIR} : ${ICU_LIBRARIES}") message(STATUS "Using icu=${USE_ICU}: ${ICU_INCLUDE_DIR} : ${ICU_LIBRARIES}")
else() else()

View File

@ -1,7 +1,7 @@
# Broken in macos. TODO: update clang, re-test, enable # Broken in macos. TODO: update clang, re-test, enable
if (NOT APPLE) if (NOT APPLE)
option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile' option for query execution" ${ENABLE_LIBRARIES}) option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile_expressions' option for query execution" ${ENABLE_LIBRARIES})
option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library. Default: system library for quicker developer builds." 0) option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library." ${NOT_UNBUNDLED})
endif () endif ()
if (ENABLE_EMBEDDED_COMPILER) if (ENABLE_EMBEDDED_COMPILER)
@ -13,27 +13,11 @@ if (ENABLE_EMBEDDED_COMPILER)
if (NOT USE_INTERNAL_LLVM_LIBRARY) if (NOT USE_INTERNAL_LLVM_LIBRARY)
set (LLVM_PATHS "/usr/local/lib/llvm") set (LLVM_PATHS "/usr/local/lib/llvm")
if (LLVM_VERSION) foreach(llvm_v 9 8)
find_package(LLVM ${LLVM_VERSION} CONFIG PATHS ${LLVM_PATHS}) if (NOT LLVM_FOUND)
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS})
find_package(LLVM ${CMAKE_CXX_COMPILER_VERSION} CONFIG PATHS ${LLVM_PATHS})
else ()
# TODO: 9 8
foreach(llvm_v 7.1 7 6 5)
if (NOT LLVM_FOUND)
find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS})
endif ()
endforeach ()
endif ()
if (LLVM_FOUND)
find_library (LLD_LIBRARY_TEST lldCore PATHS ${LLVM_LIBRARY_DIRS})
find_path (LLD_INCLUDE_DIR_TEST NAMES lld/Core/AbsoluteAtom.h PATHS ${LLVM_INCLUDE_DIRS})
if (NOT LLD_LIBRARY_TEST OR NOT LLD_INCLUDE_DIR_TEST)
set (LLVM_FOUND 0)
message(WARNING "liblld (${LLD_LIBRARY_TEST}, ${LLD_INCLUDE_DIR_TEST}) not found in ${LLVM_INCLUDE_DIRS} ${LLVM_LIBRARY_DIRS}. Disabling internal compiler.")
endif () endif ()
endif () endforeach ()
if (LLVM_FOUND) if (LLVM_FOUND)
# Remove dynamically-linked zlib and libedit from LLVM's dependencies: # Remove dynamically-linked zlib and libedit from LLVM's dependencies:
@ -51,30 +35,39 @@ if (ENABLE_EMBEDDED_COMPILER)
set (LLVM_FOUND 0) set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0) set (USE_EMBEDDED_COMPILER 0)
endif () endif ()
# TODO: fix llvm 8+ and remove:
if (LLVM_FOUND AND LLVM_VERSION_MAJOR GREATER 7)
message(WARNING "LLVM 8+ not supported yet, disabling.")
set (USE_EMBEDDED_COMPILER 0)
endif ()
else() else()
set (LLVM_FOUND 1) if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR)
set (USE_EMBEDDED_COMPILER 1) message(WARNING "Option ENABLE_EMBEDDED_COMPILER is set but LLVM library cannot build if build directory is the same as source directory.")
set (LLVM_VERSION "7.0.0bundled") set (LLVM_FOUND 0)
set (LLVM_INCLUDE_DIRS set (USE_EMBEDDED_COMPILER 0)
${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/include elseif (SPLIT_SHARED_LIBRARIES)
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/include # llvm-tablegen cannot find shared libraries that we build. Probably can be easily fixed.
${ClickHouse_SOURCE_DIR}/contrib/llvm/clang/include message(WARNING "Option ENABLE_EMBEDDED_COMPILER is not compatible with SPLIT_SHARED_LIBRARIES. Build of LLVM will be disabled.")
${ClickHouse_BINARY_DIR}/contrib/llvm/clang/include set (LLVM_FOUND 0)
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/tools/clang/include set (USE_EMBEDDED_COMPILER 0)
${ClickHouse_SOURCE_DIR}/contrib/llvm/lld/include elseif (NOT ARCH_AMD64)
${ClickHouse_BINARY_DIR}/contrib/llvm/lld/include # It's not supported yet, but you can help.
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/tools/lld/include) message(WARNING "Option ENABLE_EMBEDDED_COMPILER is only available for x86_64. Build of LLVM will be disabled.")
set (LLVM_LIBRARY_DIRS ${ClickHouse_BINARY_DIR}/contrib/llvm/llvm) set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0)
elseif (SANITIZE STREQUAL "undefined")
# llvm-tblgen, that is used during LLVM build, doesn't work with UBSan.
message(WARNING "Option ENABLE_EMBEDDED_COMPILER does not work with UBSan, because 'llvm-tblgen' tool from LLVM has undefined behaviour. Build of LLVM will be disabled.")
set (LLVM_FOUND 0)
set (USE_EMBEDDED_COMPILER 0)
else ()
set (LLVM_FOUND 1)
set (USE_EMBEDDED_COMPILER 1)
set (LLVM_VERSION "9.0.0bundled")
set (LLVM_INCLUDE_DIRS
${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/include
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/include
)
set (LLVM_LIBRARY_DIRS ${ClickHouse_BINARY_DIR}/contrib/llvm/llvm)
endif()
endif() endif()
if (LLVM_FOUND) if (LLVM_FOUND)
message(STATUS "LLVM version: ${LLVM_PACKAGE_VERSION}")
message(STATUS "LLVM include Directory: ${LLVM_INCLUDE_DIRS}") message(STATUS "LLVM include Directory: ${LLVM_INCLUDE_DIRS}")
message(STATUS "LLVM library Directory: ${LLVM_LIBRARY_DIRS}") message(STATUS "LLVM library Directory: ${LLVM_LIBRARY_DIRS}")
message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}") message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}")
@ -82,16 +75,53 @@ if (ENABLE_EMBEDDED_COMPILER)
endif() endif()
function(llvm_libs_all REQUIRED_LLVM_LIBRARIES) # This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
llvm_map_components_to_libnames (result all) set (REQUIRED_LLVM_LIBRARIES
if (USE_STATIC_LIBRARIES OR NOT "LLVM" IN_LIST result) LLVMOrcJIT
list (REMOVE_ITEM result "LTO" "LLVM") LLVMExecutionEngine
else() LLVMRuntimeDyld
set (result "LLVM") LLVMX86CodeGen
endif () LLVMX86Desc
if (TERMCAP_LIBRARY) LLVMX86Info
list (APPEND result ${TERMCAP_LIBRARY}) LLVMX86Utils
endif () LLVMAsmPrinter
list (APPEND result ${CMAKE_DL_LIBS} ${ZLIB_LIBRARIES}) LLVMDebugInfoDWARF
set (${REQUIRED_LLVM_LIBRARIES} ${result} PARENT_SCOPE) LLVMGlobalISel
endfunction() LLVMSelectionDAG
LLVMMCDisassembler
LLVMPasses
LLVMCodeGen
LLVMipo
LLVMBitWriter
LLVMInstrumentation
LLVMScalarOpts
LLVMAggressiveInstCombine
LLVMInstCombine
LLVMVectorize
LLVMTransformUtils
LLVMTarget
LLVMAnalysis
LLVMProfileData
LLVMObject
LLVMBitReader
LLVMCore
LLVMRemarks
LLVMBitstreamReader
LLVMMCParser
LLVMMC
LLVMBinaryFormat
LLVMDebugInfoCodeView
LLVMSupport
LLVMDemangle
)
#function(llvm_libs_all REQUIRED_LLVM_LIBRARIES)
# llvm_map_components_to_libnames (result all)
# if (USE_STATIC_LIBRARIES OR NOT "LLVM" IN_LIST result)
# list (REMOVE_ITEM result "LTO" "LLVM")
# else()
# set (result "LLVM")
# endif ()
# list (APPEND result ${CMAKE_DL_LIBS} ${ZLIB_LIBRARIES})
# set (${REQUIRED_LLVM_LIBRARIES} ${result} PARENT_SCOPE)
#endfunction()

View File

@ -1,7 +1,8 @@
option (ENABLE_ORC "Enable ORC" ${ENABLE_LIBRARIES}) option (ENABLE_ORC "Enable ORC" ${ENABLE_LIBRARIES})
if(ENABLE_ORC) if(ENABLE_ORC)
option (USE_INTERNAL_ORC_LIBRARY "Set to FALSE to use system ORC instead of bundled" ${NOT_UNBUNDLED}) include(cmake/find/snappy.cmake)
option(USE_INTERNAL_ORC_LIBRARY "Set to FALSE to use system ORC instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/orc/c++/include/orc/OrcFile.hh") if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/orc/c++/include/orc/OrcFile.hh")
if(USE_INTERNAL_ORC_LIBRARY) if(USE_INTERNAL_ORC_LIBRARY)
@ -25,7 +26,7 @@ endif ()
if (ORC_LIBRARY AND ORC_INCLUDE_DIR) if (ORC_LIBRARY AND ORC_INCLUDE_DIR)
set(USE_ORC 1) set(USE_ORC 1)
elseif(NOT MISSING_INTERNAL_ORC_LIBRARY AND ARROW_LIBRARY) # (LIBGSASL_LIBRARY AND LIBXML2_LIBRARY) elseif(NOT MISSING_INTERNAL_ORC_LIBRARY AND ARROW_LIBRARY AND SNAPPY_LIBRARY) # (LIBGSASL_LIBRARY AND LIBXML2_LIBRARY)
set(ORC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/orc/c++/include") set(ORC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/orc/c++/include")
set(ORC_LIBRARY orc) set(ORC_LIBRARY orc)
set(USE_ORC 1) set(USE_ORC 1)

View File

@ -24,7 +24,10 @@ endif()
if(ARROW_INCLUDE_DIR AND PARQUET_INCLUDE_DIR) if(ARROW_INCLUDE_DIR AND PARQUET_INCLUDE_DIR)
elseif(NOT MISSING_INTERNAL_PARQUET_LIBRARY AND NOT OS_FREEBSD) elseif(NOT MISSING_INTERNAL_PARQUET_LIBRARY AND NOT OS_FREEBSD)
include(cmake/find/snappy.cmake) include(cmake/find/snappy.cmake)
set(CAN_USE_INTERNAL_PARQUET_LIBRARY 1) if(SNAPPY_LIBRARY)
set(CAN_USE_INTERNAL_PARQUET_LIBRARY 1)
endif()
include(CheckCXXSourceCompiles) include(CheckCXXSourceCompiles)
if(NOT USE_INTERNAL_DOUBLE_CONVERSION_LIBRARY) if(NOT USE_INTERNAL_DOUBLE_CONVERSION_LIBRARY)
set(CMAKE_REQUIRED_LIBRARIES ${DOUBLE_CONVERSION_LIBRARIES}) set(CMAKE_REQUIRED_LIBRARIES ${DOUBLE_CONVERSION_LIBRARIES})

View File

@ -8,6 +8,14 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/poco/CMakeLists.txt")
set (MISSING_INTERNAL_POCO_LIBRARY 1) set (MISSING_INTERNAL_POCO_LIBRARY 1)
endif () endif ()
if (NOT ENABLE_LIBRARIES)
set (ENABLE_POCO_NETSSL ${ENABLE_LIBRARIES} CACHE BOOL "")
set (ENABLE_POCO_MONGODB ${ENABLE_LIBRARIES} CACHE BOOL "")
set (ENABLE_POCO_REDIS ${ENABLE_LIBRARIES} CACHE BOOL "")
set (ENABLE_POCO_ODBC ${ENABLE_LIBRARIES} CACHE BOOL "")
set (ENABLE_POCO_SQL ${ENABLE_LIBRARIES} CACHE BOOL "")
endif ()
set (POCO_COMPONENTS Net XML SQL Data) set (POCO_COMPONENTS Net XML SQL Data)
if (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL) if (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)
list (APPEND POCO_COMPONENTS Crypto NetSSL) list (APPEND POCO_COMPONENTS Crypto NetSSL)

26
cmake/find/s3.cmake Normal file
View File

@ -0,0 +1,26 @@
if(NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_ARM)
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
endif()
if(ENABLE_S3)
option(USE_INTERNAL_AWS_S3_LIBRARY "Set to FALSE to use system S3 instead of bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3")
message (WARNING "submodule contrib/aws is missing. to fix try run: \n git submodule update --init --recursive")
set (MISSING_AWS_S3 1)
endif ()
if (USE_INTERNAL_AWS_S3_LIBRARY AND NOT MISSING_AWS_S3)
set(AWS_S3_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3/include")
set(AWS_S3_CORE_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core/include")
set(AWS_S3_LIBRARY aws_s3)
set(USE_INTERNAL_AWS_S3_LIBRARY 1)
set(USE_AWS_S3 1)
else()
set(USE_INTERNAL_AWS_S3_LIBRARY 0)
set(USE_AWS_S3 0)
endif ()
endif()
message (STATUS "Using aws_s3=${USE_AWS_S3}: ${AWS_S3_INCLUDE_DIR} : ${AWS_S3_LIBRARY}")

View File

@ -6,9 +6,9 @@ if(NOT ARCH_32)
option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED}) option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED})
endif() endif()
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/ssl/CMakeLists.txt") if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/openssl/README")
if(USE_INTERNAL_SSL_LIBRARY) if(USE_INTERNAL_SSL_LIBRARY)
message(WARNING "submodule contrib/ssl is missing. to fix try run: \n git submodule update --init --recursive") message(WARNING "submodule contrib/openssl is missing. to fix try run: \n git submodule update --init --recursive")
endif() endif()
set(USE_INTERNAL_SSL_LIBRARY 0) set(USE_INTERNAL_SSL_LIBRARY 0)
set(MISSING_INTERNAL_SSL_LIBRARY 1) set(MISSING_INTERNAL_SSL_LIBRARY 1)
@ -42,17 +42,17 @@ endif ()
if (NOT OPENSSL_FOUND AND NOT MISSING_INTERNAL_SSL_LIBRARY) if (NOT OPENSSL_FOUND AND NOT MISSING_INTERNAL_SSL_LIBRARY)
set (USE_INTERNAL_SSL_LIBRARY 1) set (USE_INTERNAL_SSL_LIBRARY 1)
set (OPENSSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/ssl") set (OPENSSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/openssl")
set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include")
if (NOT USE_STATIC_LIBRARIES AND TARGET crypto-shared AND TARGET ssl-shared) if (ARCH_AMD64)
set (OPENSSL_CRYPTO_LIBRARY crypto-shared) set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include" "${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake/linux_x86_64/include")
set (OPENSSL_SSL_LIBRARY ssl-shared) elseif (ARCH_AARCH64)
else () set (OPENSSL_INCLUDE_DIR "${OPENSSL_ROOT_DIR}/include" "${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake/linux_aarch64/include")
set (OPENSSL_CRYPTO_LIBRARY crypto)
set (OPENSSL_SSL_LIBRARY ssl)
endif () endif ()
set (OPENSSL_LIBRARIES ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) set (OPENSSL_CRYPTO_LIBRARY crypto)
set (OPENSSL_SSL_LIBRARY ssl)
set (OPENSSL_FOUND 1) set (OPENSSL_FOUND 1)
set (OPENSSL_LIBRARIES ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY})
endif () endif ()
if(OPENSSL_FOUND) if(OPENSSL_FOUND)

View File

@ -1,5 +1,8 @@
find_library (TERMCAP_LIBRARY termcap) find_library (TERMCAP_LIBRARY tinfo)
if (NOT TERMCAP_LIBRARY) if (NOT TERMCAP_LIBRARY)
find_library (TERMCAP_LIBRARY tinfo) find_library (TERMCAP_LIBRARY ncurses)
endif()
if (NOT TERMCAP_LIBRARY)
find_library (TERMCAP_LIBRARY termcap)
endif() endif()
message (STATUS "Using termcap: ${TERMCAP_LIBRARY}") message (STATUS "Using termcap: ${TERMCAP_LIBRARY}")

View File

@ -4,6 +4,11 @@ if (NOT CMAKE_SYSTEM MATCHES "Linux" OR ARCH_ARM OR ARCH_32)
set (USE_UNWIND OFF) set (USE_UNWIND OFF)
endif () endif ()
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libunwind/CMakeLists.txt")
message(WARNING "submodule contrib/libunwind is missing. to fix try run: \n git submodule update --init --recursive")
set (USE_UNWIND OFF)
endif ()
if (USE_UNWIND) if (USE_UNWIND)
add_subdirectory(contrib/libunwind-cmake) add_subdirectory(contrib/libunwind-cmake)
set (UNWIND_LIBRARIES unwind) set (UNWIND_LIBRARIES unwind)

View File

@ -18,6 +18,14 @@ message(STATUS "Default libraries: ${DEFAULT_LIBS}")
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS}) set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS}) set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
# glibc-compatibility library relies to fixed version of libc headers
# (because minor changes in function attributes between different glibc versions will introduce incompatibilities)
# This is for x86_64. For other architectures we have separate toolchains.
if (ARCH_AMD64)
set(CMAKE_C_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers)
set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${ClickHouse_SOURCE_DIR}/contrib/libc-headers/x86_64-linux-gnu ${ClickHouse_SOURCE_DIR}/contrib/libc-headers)
endif ()
# Global libraries # Global libraries
add_library(global-libs INTERFACE) add_library(global-libs INTERFACE)

View File

@ -2,13 +2,18 @@ set (CMAKE_SYSTEM_NAME "Linux")
set (CMAKE_SYSTEM_PROCESSOR "aarch64") set (CMAKE_SYSTEM_PROCESSOR "aarch64")
set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/aarch64-linux-gnu/libc") set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/aarch64-linux-gnu/libc")
# We don't use compiler from toolchain because it's gcc-8, and we provide support only for gcc-9. # We don't use compiler from toolchain because it's gcc-8, and we provide support only for gcc-9.
set (CMAKE_AR "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ar") set (CMAKE_AR "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ar" CACHE FILEPATH "" FORCE)
set (CMAKE_RANLIB "${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64/bin/aarch64-linux-gnu-ranlib" CACHE FILEPATH "" FORCE)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64") set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64") set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${CMAKE_CURRENT_LIST_DIR}/../toolchain/linux-aarch64")
set (LINKER_NAME "lld" CACHE STRING "" FORCE)
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld") set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld") set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")

View File

@ -23,7 +23,7 @@ if (SANITIZE)
# RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to # RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to
# keep the binary size down. # keep the binary size down.
# TODO: try compiling with -Og and with ld.gold. # TODO: try compiling with -Og and with ld.gold.
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls") set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/dbms/tests/msan_suppressions.txt")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
@ -40,7 +40,6 @@ if (SANITIZE)
set (ENABLE_HDFS 0 CACHE BOOL "") set (ENABLE_HDFS 0 CACHE BOOL "")
set (ENABLE_CAPNP 0 CACHE BOOL "") set (ENABLE_CAPNP 0 CACHE BOOL "")
set (ENABLE_RDKAFKA 0 CACHE BOOL "") set (ENABLE_RDKAFKA 0 CACHE BOOL "")
set (ENABLE_ICU 0 CACHE BOOL "")
set (ENABLE_POCO_MONGODB 0 CACHE BOOL "") set (ENABLE_POCO_MONGODB 0 CACHE BOOL "")
set (ENABLE_POCO_NETSSL 0 CACHE BOOL "") set (ENABLE_POCO_NETSSL 0 CACHE BOOL "")
set (ENABLE_POCO_ODBC 0 CACHE BOOL "") set (ENABLE_POCO_ODBC 0 CACHE BOOL "")
@ -77,6 +76,9 @@ if (SANITIZE)
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan")
endif () endif ()
# llvm-tblgen, that is used during LLVM build, doesn't work with UBSan.
set (ENABLE_EMBEDDED_COMPILER 0 CACHE BOOL "")
elseif (SANITIZE STREQUAL "libfuzzer") elseif (SANITIZE STREQUAL "libfuzzer")
# NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends. # NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends.
# NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them (tests) have entry point for fuzzer and it's not checked. # NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them (tests) have entry point for fuzzer and it's not checked.

View File

@ -1,2 +1,2 @@
wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz
tar --strip-components=1 xJf MacOSX10.14.sdk.tar.xz tar xJf MacOSX10.14.sdk.tar.xz --strip-components=1

View File

@ -1,2 +1,2 @@
wget https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz
tar --strip-components=1 xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz --strip-components=1

View File

@ -1,6 +1,6 @@
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (COMPILER_GCC 1) set (COMPILER_GCC 1)
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang|AppleClang")
set (COMPILER_CLANG 1) set (COMPILER_CLANG 1)
endif () endif ()
@ -20,16 +20,38 @@ else ()
message (WARNING "You are using an unsupported compiler. Compilation has only been tested with Clang 6+ and GCC 7+.") message (WARNING "You are using an unsupported compiler. Compilation has only been tested with Clang 6+ and GCC 7+.")
endif () endif ()
STRING(REGEX MATCHALL "[0-9]+" COMPILER_VERSION_LIST ${CMAKE_CXX_COMPILER_VERSION})
LIST(GET COMPILER_VERSION_LIST 0 COMPILER_VERSION_MAJOR)
option (LINKER_NAME "Linker name or full path") option (LINKER_NAME "Linker name or full path")
if (COMPILER_GCC)
find_program (LLD_PATH NAMES "ld.lld")
find_program (GOLD_PATH NAMES "ld.gold")
else ()
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld.lld" "lld")
find_program (GOLD_PATH NAMES "ld.gold" "gold")
endif ()
find_program (LLD_PATH NAMES "ld.lld" "lld") # We prefer LLD linker over Gold or BFD.
find_program (GOLD_PATH NAMES "ld.gold" "gold")
if (NOT LINKER_NAME) if (NOT LINKER_NAME)
if (LLD_PATH) if (LLD_PATH)
set (LINKER_NAME "lld") if (COMPILER_GCC)
elseif (GOLD_PATH) # GCC driver requires one of supported linker names like "lld".
set (LINKER_NAME "gold") set (LINKER_NAME "lld")
else ()
# Clang driver simply allows full linker path.
set (LINKER_NAME ${LLD_PATH})
endif ()
endif ()
endif ()
if (NOT LINKER_NAME)
if (GOLD_PATH)
if (COMPILER_GCC)
set (LINKER_NAME "gold")
else ()
set (LINKER_NAME ${GOLD_PATH})
endif ()
endif () endif ()
endif () endif ()

View File

@ -52,6 +52,7 @@ if (USE_INTERNAL_BTRIE_LIBRARY)
endif () endif ()
if (USE_INTERNAL_ZLIB_LIBRARY) if (USE_INTERNAL_ZLIB_LIBRARY)
unset (BUILD_SHARED_LIBS CACHE)
set (ZLIB_ENABLE_TESTS 0 CACHE INTERNAL "") set (ZLIB_ENABLE_TESTS 0 CACHE INTERNAL "")
set (SKIP_INSTALL_ALL 1 CACHE INTERNAL "") set (SKIP_INSTALL_ALL 1 CACHE INTERNAL "")
set (ZLIB_COMPAT 1 CACHE INTERNAL "") # also enables WITH_GZFILEOP set (ZLIB_COMPAT 1 CACHE INTERNAL "") # also enables WITH_GZFILEOP
@ -65,34 +66,19 @@ if (USE_INTERNAL_ZLIB_LIBRARY)
endif () endif ()
add_subdirectory (${INTERNAL_ZLIB_NAME}) add_subdirectory (${INTERNAL_ZLIB_NAME})
# TODO: make pull to Dead2/zlib-ng and remove:
# We should use same defines when including zlib.h as used when zlib compiled # We should use same defines when including zlib.h as used when zlib compiled
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP) target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP) target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "AMD64") if (ARCH_AMD64 OR ARCH_AARCH64)
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK) target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK) target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK)
endif () endif ()
#set_target_properties(example PROPERTIES EXCLUDE_FROM_ALL 1)
#if (TARGET example64)
# set_target_properties(example64 PROPERTIES EXCLUDE_FROM_ALL 1)
#endif ()
#set_target_properties(minigzip PROPERTIES EXCLUDE_FROM_ALL 1)
#if (TARGET minigzip64)
# set_target_properties(minigzip64 PROPERTIES EXCLUDE_FROM_ALL 1)
#endif ()
endif () endif ()
if (USE_INTERNAL_CCTZ_LIBRARY) if (USE_INTERNAL_CCTZ_LIBRARY)
add_subdirectory (cctz-cmake) add_subdirectory (cctz-cmake)
endif () endif ()
if (ENABLE_TCMALLOC AND USE_INTERNAL_GPERFTOOLS_LIBRARY)
add_subdirectory (libtcmalloc)
endif ()
if (ENABLE_JEMALLOC AND USE_INTERNAL_JEMALLOC_LIBRARY) if (ENABLE_JEMALLOC AND USE_INTERNAL_JEMALLOC_LIBRARY)
add_subdirectory (jemalloc-cmake) add_subdirectory (jemalloc-cmake)
endif () endif ()
@ -106,20 +92,10 @@ if (USE_INTERNAL_H3_LIBRARY)
endif () endif ()
if (USE_INTERNAL_SSL_LIBRARY) if (USE_INTERNAL_SSL_LIBRARY)
if (NOT MAKE_STATIC_LIBRARIES) add_subdirectory (openssl-cmake)
set (BUILD_SHARED 1)
endif ()
# By default, ${CMAKE_INSTALL_PREFIX}/etc/ssl is selected - that is not what we need. # This is for Poco library
# We need to use system wide ssl directory.
set (OPENSSLDIR "/etc/ssl")
set (LIBRESSL_SKIP_INSTALL 1 CACHE INTERNAL "")
add_subdirectory (ssl)
target_include_directories(${OPENSSL_CRYPTO_LIBRARY} SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR})
target_include_directories(${OPENSSL_SSL_LIBRARY} SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR})
set (POCO_SKIP_OPENSSL_FIND 1) set (POCO_SKIP_OPENSSL_FIND 1)
add_library(OpenSSL::Crypto ALIAS ${OPENSSL_CRYPTO_LIBRARY}) add_library(OpenSSL::Crypto ALIAS ${OPENSSL_CRYPTO_LIBRARY})
add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY}) add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY})
endif () endif ()
@ -164,6 +140,10 @@ if (ENABLE_ODBC AND USE_INTERNAL_ODBC_LIBRARY)
add_library(ODBC::ODBC ALIAS ${ODBC_LIBRARIES}) add_library(ODBC::ODBC ALIAS ${ODBC_LIBRARIES})
endif () endif ()
if (ENABLE_ICU AND USE_INTERNAL_ICU_LIBRARY)
add_subdirectory (icu-cmake)
endif ()
if (USE_INTERNAL_PARQUET_LIBRARY) if (USE_INTERNAL_PARQUET_LIBRARY)
if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE) if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
# We dont use arrow's cmakefiles because they uses too many depends and download some libs in compile time # We dont use arrow's cmakefiles because they uses too many depends and download some libs in compile time
@ -180,10 +160,7 @@ if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
set (ARROW_VERBOSE_THIRDPARTY_BUILD ON CACHE INTERNAL "") set (ARROW_VERBOSE_THIRDPARTY_BUILD ON CACHE INTERNAL "")
set (ARROW_BUILD_SHARED 1 CACHE INTERNAL "") set (ARROW_BUILD_SHARED 1 CACHE INTERNAL "")
set (ARROW_BOOST_HEADER_ONLY ON CACHE INTERNAL "") set (ARROW_BOOST_HEADER_ONLY ON CACHE INTERNAL "")
#set (BOOST_INCLUDEDIR Boost_INCLUDE_DIRS)
set (Boost_FOUND 1 CACHE INTERNAL "") set (Boost_FOUND 1 CACHE INTERNAL "")
#set (ZLIB_HOME ${ZLIB_INCLUDE_DIR})
#set (ZLIB_FOUND 1)
if (MAKE_STATIC_LIBRARIES) if (MAKE_STATIC_LIBRARIES)
set (PARQUET_ARROW_LINKAGE "static" CACHE INTERNAL "") set (PARQUET_ARROW_LINKAGE "static" CACHE INTERNAL "")
set (ARROW_TEST_LINKAGE "static" CACHE INTERNAL "") set (ARROW_TEST_LINKAGE "static" CACHE INTERNAL "")
@ -223,6 +200,11 @@ else()
endif() endif()
add_subdirectory(arrow-cmake) add_subdirectory(arrow-cmake)
# The library is large - avoid bloat.
target_compile_options (${ARROW_LIBRARY} PRIVATE -g0)
target_compile_options (${THRIFT_LIBRARY} PRIVATE -g0)
target_compile_options (${PARQUET_LIBRARY} PRIVATE -g0)
endif() endif()
endif() endif()
@ -259,28 +241,14 @@ elseif(GTEST_SRC_DIR)
target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0) target_compile_definitions(gtest INTERFACE GTEST_HAS_POSIX_RE=0)
endif() endif()
if (USE_INTERNAL_LLVM_LIBRARY) if (USE_EMBEDDED_COMPILER AND USE_INTERNAL_LLVM_LIBRARY)
file(GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp CONTENT " ")
add_library(LLVM0 ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp) # silly cmake bug fix
add_library(LLVMOFF ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp)
# ld: unknown option: --color-diagnostics # ld: unknown option: --color-diagnostics
if (APPLE) if (APPLE)
set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "") set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "")
endif () endif ()
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "") set (LLVM_ENABLE_EH 1 CACHE INTERNAL "")
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "") set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
set (LLVM_INCLUDE_TESTS 0 CACHE INTERNAL "") set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE STRING "")
set (LLVM_INCLUDE_EXAMPLES 0 CACHE INTERNAL "")
set (LLVM_INCLUDE_TOOLS 0 CACHE INTERNAL "")
set (LLVM_INSTALL_TOOLCHAIN_ONLY 0 CACHE INTERNAL "")
set (CLANG_BUILT_STANDALONE 0 CACHE INTERNAL "")
set (LLDB_BUILT_STANDALONE 0 CACHE INTERNAL "")
set (CLANG_ENABLE_STATIC_ANALYZER 0 CACHE INTERNAL "")
set (CLANG_ENABLE_ARCMT 0 CACHE INTERNAL "")
set (CLANG_BUILD_TOOLS 0 CACHE INTERNAL "")
set (BENCHMARK_ENABLE_GTEST_TESTS 0 CACHE INTERNAL "")
set (BENCHMARK_ENABLE_ASSEMBLY_TESTS 0 CACHE INTERNAL "")
set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE INTERNAL "")
add_subdirectory (llvm/llvm) add_subdirectory (llvm/llvm)
endif () endif ()
@ -312,12 +280,39 @@ if (USE_INTERNAL_HDFS3_LIBRARY)
add_subdirectory(libhdfs3-cmake) add_subdirectory(libhdfs3-cmake)
endif () endif ()
if (USE_INTERNAL_AWS_S3_LIBRARY)
set (save_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
set (save_CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
set (save_CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES})
set (save_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS})
set (save_CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH})
add_subdirectory(curl-cmake)
set (CMAKE_C_FLAGS ${save_CMAKE_C_FLAGS})
set (CMAKE_REQUIRED_LIBRARIES ${save_CMAKE_REQUIRED_LIBRARIES})
set (CMAKE_CMAKE_REQUIRED_INCLUDES ${save_CMAKE_REQUIRED_INCLUDES})
set (CMAKE_REQUIRED_FLAGS ${save_CMAKE_REQUIRED_FLAGS})
set (CMAKE_CMAKE_MODULE_PATH ${save_CMAKE_MODULE_PATH})
add_subdirectory(aws-s3-cmake)
# The library is large - avoid bloat.
target_compile_options (aws_s3 PRIVATE -g0)
target_compile_options (aws_s3_checksums PRIVATE -g0)
target_compile_options (libcurl PRIVATE -g0)
endif ()
if (USE_BASE64) if (USE_BASE64)
add_subdirectory (base64-cmake) add_subdirectory (base64-cmake)
endif() endif()
if (USE_INTERNAL_HYPERSCAN_LIBRARY) if (USE_INTERNAL_HYPERSCAN_LIBRARY)
add_subdirectory (hyperscan) add_subdirectory (hyperscan)
# The library is large - avoid bloat.
if (USE_STATIC_LIBRARIES)
target_compile_options (hs PRIVATE -g0)
else ()
target_compile_options (hs_shared PRIVATE -g0)
endif ()
endif() endif()
if (USE_SIMDJSON) if (USE_SIMDJSON)
@ -331,7 +326,3 @@ endif()
if (USE_FASTOPS) if (USE_FASTOPS)
add_subdirectory (fastops-cmake) add_subdirectory (fastops-cmake)
endif() endif()
#if (USE_INTERNAL_ORC_LIBRARY)
# add_subdirectory(orc-cmake)
#endif ()

View File

@ -70,6 +70,14 @@ add_custom_command(OUTPUT orc_proto.pb.h orc_proto.pb.cc
--cpp_out="${CMAKE_CURRENT_BINARY_DIR}" --cpp_out="${CMAKE_CURRENT_BINARY_DIR}"
"${PROTO_DIR}/orc_proto.proto") "${PROTO_DIR}/orc_proto.proto")
# arrow-cmake cmake file calling orc cmake subroutine which detects certain compiler features.
# Apple Clang compiler failed to compile this code without specifying c++11 standard.
# As result these compiler features detected as absent. In result it failed to compile orc itself.
# In orc makefile there is code that sets flags, but arrow-cmake ignores these flags.
if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
set (CXX11_FLAGS "-std=c++0x")
endif()
include(${ClickHouse_SOURCE_DIR}/contrib/orc/cmake_modules/CheckSourceCompiles.cmake) include(${ClickHouse_SOURCE_DIR}/contrib/orc/cmake_modules/CheckSourceCompiles.cmake)
include(orc_check.cmake) include(orc_check.cmake)
configure_file("${ORC_INCLUDE_DIR}/orc/orc-config.hh.in" "${ORC_BUILD_INCLUDE_DIR}/orc/orc-config.hh") configure_file("${ORC_INCLUDE_DIR}/orc/orc-config.hh.in" "${ORC_BUILD_INCLUDE_DIR}/orc/orc-config.hh")

1
contrib/aws vendored Submodule

@ -0,0 +1 @@
Subproject commit 45dd8552d3c492defca79d2720bcc809e35654da

1
contrib/aws-c-common vendored Submodule

@ -0,0 +1 @@
Subproject commit 736a82d1697c108b04a277e66438a7f4e19b6857

1
contrib/aws-c-event-stream vendored Submodule

@ -0,0 +1 @@
Subproject commit 3bc33662f9ccff4f4cbcf9509cc78c26e022fde0

1
contrib/aws-checksums vendored Submodule

@ -0,0 +1 @@
Subproject commit 519d6d9093819b6cf89ffff589a27ef8f83d0f65

View File

@ -0,0 +1,107 @@
SET(AWS_S3_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3)
SET(AWS_CORE_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core)
SET(AWS_CHECKSUMS_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-checksums)
SET(AWS_COMMON_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-c-common)
SET(AWS_EVENT_STREAM_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream)
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
configure_file("${AWS_CORE_LIBRARY_DIR}/include/aws/core/SDKConfig.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
configure_file("${AWS_COMMON_LIBRARY_DIR}/include/aws/common/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
file(GLOB AWS_CORE_SOURCES
"${AWS_CORE_LIBRARY_DIR}/source/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/auth/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/client/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/standard/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/curl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/config/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/external/cjson/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/external/tinyxml2/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/internal/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/monitoring/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/net/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/linux-shared/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/platform/linux-shared/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/base64/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/event/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/openssl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/factory/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/json/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/logging/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/stl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/stream/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/threading/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/xml/*.cpp"
)
file(GLOB AWS_S3_SOURCES
"${AWS_S3_LIBRARY_DIR}/source/*.cpp"
)
file(GLOB AWS_S3_MODEL_SOURCES
"${AWS_S3_LIBRARY_DIR}/source/model/*.cpp"
)
file(GLOB AWS_EVENT_STREAM_SOURCES
"${AWS_EVENT_STREAM_LIBRARY_DIR}/source/*.c"
)
file(GLOB AWS_COMMON_SOURCES
"${AWS_COMMON_LIBRARY_DIR}/source/*.c"
"${AWS_COMMON_LIBRARY_DIR}/source/posix/*.c"
)
file(GLOB AWS_CHECKSUMS_SOURCES
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/*.c"
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/intel/*.c"
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/arm/*.c"
)
file(GLOB S3_UNIFIED_SRC
${AWS_EVENT_STREAM_SOURCES}
${AWS_COMMON_SOURCES}
${AWS_S3_SOURCES}
${AWS_S3_MODEL_SOURCES}
${AWS_CORE_SOURCES}
)
set(S3_INCLUDES
"${CMAKE_CURRENT_SOURCE_DIR}/include/"
"${AWS_COMMON_LIBRARY_DIR}/include/"
"${AWS_EVENT_STREAM_LIBRARY_DIR}/include/"
"${AWS_S3_LIBRARY_DIR}/include/"
"${AWS_CORE_LIBRARY_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include/"
)
add_library(aws_s3_checksums ${AWS_CHECKSUMS_SOURCES})
target_include_directories(aws_s3_checksums PUBLIC "${AWS_CHECKSUMS_LIBRARY_DIR}/include/")
if(CMAKE_BUILD_TYPE STREQUAL "" OR CMAKE_BUILD_TYPE STREQUAL "Debug")
target_compile_definitions(aws_s3_checksums PRIVATE "-DDEBUG_BUILD")
endif()
set_target_properties(aws_s3_checksums PROPERTIES COMPILE_OPTIONS -fPIC)
set_target_properties(aws_s3_checksums PROPERTIES LINKER_LANGUAGE C)
set_property(TARGET aws_s3_checksums PROPERTY C_STANDARD 99)
add_library(aws_s3 ${S3_UNIFIED_SRC})
target_compile_definitions(aws_s3 PUBLIC -DENABLE_CURL_CLIENT)
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
target_include_directories(aws_s3 PUBLIC ${S3_INCLUDES} "${CMAKE_BINARY_DIR}/install")
if (OPENSSL_FOUND)
target_compile_definitions(aws_s3 PUBLIC -DENABLE_OPENSSL_ENCRYPTION)
target_link_libraries(aws_s3 PRIVATE ${OPENSSL_LIBRARIES})
endif()
target_link_libraries(aws_s3 PRIVATE aws_s3_checksums libcurl)

View File

@ -28,8 +28,7 @@ set (KJ_SRCS
) )
add_library(kj ${KJ_SRCS}) add_library(kj ${KJ_SRCS})
target_include_directories(kj PUBLIC ${CAPNPROTO_SOURCE_DIR}) target_include_directories(kj SYSTEM PUBLIC ${CAPNPROTO_SOURCE_DIR})
target_compile_options(kj PUBLIC -Wno-non-virtual-dtor)
set (CAPNP_SRCS set (CAPNP_SRCS
${CAPNPROTO_SOURCE_DIR}/capnp/c++.capnp.c++ ${CAPNPROTO_SOURCE_DIR}/capnp/c++.capnp.c++
@ -50,6 +49,9 @@ set (CAPNP_SRCS
) )
add_library(capnp ${CAPNP_SRCS}) add_library(capnp ${CAPNP_SRCS})
set_target_properties(capnp
PROPERTIES LINKER_LANGUAGE CXX
)
target_link_libraries(capnp PUBLIC kj) target_link_libraries(capnp PUBLIC kj)
set (CAPNPC_SRCS set (CAPNPC_SRCS
@ -67,3 +69,15 @@ set (CAPNPC_SRCS
add_library(capnpc ${CAPNPC_SRCS}) add_library(capnpc ${CAPNPC_SRCS})
target_link_libraries(capnpc PUBLIC capnp) target_link_libraries(capnpc PUBLIC capnp)
# The library has substandard code
if (COMPILER_GCC)
set (SUPPRESS_WARNINGS -Wno-non-virtual-dtor -Wno-sign-compare -Wno-strict-aliasing -Wno-maybe-uninitialized
-Wno-deprecated-declarations -Wno-class-memaccess)
elseif (COMPILER_CLANG)
set (SUPPRESS_WARNINGS -Wno-non-virtual-dtor -Wno-sign-compare -Wno-strict-aliasing -Wno-deprecated-declarations)
endif ()
target_compile_options(kj PRIVATE ${SUPPRESS_WARNINGS})
target_compile_options(capnp PRIVATE ${SUPPRESS_WARNINGS})
target_compile_options(capnpc PRIVATE ${SUPPRESS_WARNINGS})

View File

@ -1,6 +1,6 @@
add_library(roaring add_library(roaring
roaring.c roaring.c
roaring/roaring.h roaring/roaring.h
roaring/roaring.hh) roaring/roaring.hh)
target_include_directories (roaring PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) target_include_directories (roaring SYSTEM PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})

1
contrib/curl vendored Submodule

@ -0,0 +1 @@
Subproject commit 3b8bbbbd1609c638a3d3d0acb148a33dedb67be3

View File

@ -0,0 +1,61 @@
include(CheckCSourceCompiles)
option(CURL_HIDDEN_SYMBOLS "Set to ON to hide libcurl internal symbols (=hide all symbols that aren't officially external)." ON)
mark_as_advanced(CURL_HIDDEN_SYMBOLS)
if(CURL_HIDDEN_SYMBOLS)
set(SUPPORTS_SYMBOL_HIDING FALSE)
if(CMAKE_C_COMPILER_ID MATCHES "Clang")
set(SUPPORTS_SYMBOL_HIDING TRUE)
set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))")
set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden")
elseif(CMAKE_COMPILER_IS_GNUCC)
if(NOT CMAKE_VERSION VERSION_LESS 2.8.10)
set(GCC_VERSION ${CMAKE_C_COMPILER_VERSION})
else()
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion
OUTPUT_VARIABLE GCC_VERSION)
endif()
if(NOT GCC_VERSION VERSION_LESS 3.4)
# note: this is considered buggy prior to 4.0 but the autotools don't care, so let's ignore that fact
set(SUPPORTS_SYMBOL_HIDING TRUE)
set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))")
set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden")
endif()
elseif(CMAKE_C_COMPILER_ID MATCHES "SunPro" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 8.0)
set(SUPPORTS_SYMBOL_HIDING TRUE)
set(_SYMBOL_EXTERN "__global")
set(_CFLAG_SYMBOLS_HIDE "-xldscope=hidden")
elseif(CMAKE_C_COMPILER_ID MATCHES "Intel" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 9.0)
# note: this should probably just check for version 9.1.045 but I'm not 100% sure
# so let's do it the same way autotools do.
set(SUPPORTS_SYMBOL_HIDING TRUE)
set(_SYMBOL_EXTERN "__attribute__ ((__visibility__ (\"default\")))")
set(_CFLAG_SYMBOLS_HIDE "-fvisibility=hidden")
check_c_source_compiles("#include <stdio.h>
int main (void) { printf(\"icc fvisibility bug test\"); return 0; }" _no_bug)
if(NOT _no_bug)
set(SUPPORTS_SYMBOL_HIDING FALSE)
set(_SYMBOL_EXTERN "")
set(_CFLAG_SYMBOLS_HIDE "")
endif()
elseif(MSVC)
set(SUPPORTS_SYMBOL_HIDING TRUE)
endif()
set(HIDES_CURL_PRIVATE_SYMBOLS ${SUPPORTS_SYMBOL_HIDING})
elseif(MSVC)
if(NOT CMAKE_VERSION VERSION_LESS 3.7)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE) #present since 3.4.3 but broken
set(HIDES_CURL_PRIVATE_SYMBOLS FALSE)
else()
message(WARNING "Hiding private symbols regardless CURL_HIDDEN_SYMBOLS being disabled.")
set(HIDES_CURL_PRIVATE_SYMBOLS TRUE)
endif()
else()
set(HIDES_CURL_PRIVATE_SYMBOLS FALSE)
endif()
set(CURL_CFLAG_SYMBOLS_HIDE ${_CFLAG_SYMBOLS_HIDE})
set(CURL_EXTERN_SYMBOL ${_SYMBOL_EXTERN})

View File

@ -0,0 +1,617 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#ifdef TIME_WITH_SYS_TIME
/* Time with sys/time test */
#include <sys/types.h>
#include <sys/time.h>
#include <time.h>
int
main ()
{
if ((struct tm *) 0)
return 0;
;
return 0;
}
#endif
#ifdef HAVE_FCNTL_O_NONBLOCK
/* headers for FCNTL_O_NONBLOCK test */
#include <sys/types.h>
#include <unistd.h>
#include <fcntl.h>
/* */
#if defined(sun) || defined(__sun__) || \
defined(__SUNPRO_C) || defined(__SUNPRO_CC)
# if defined(__SVR4) || defined(__srv4__)
# define PLATFORM_SOLARIS
# else
# define PLATFORM_SUNOS4
# endif
#endif
#if (defined(_AIX) || defined(__xlC__)) && !defined(_AIX41)
# define PLATFORM_AIX_V3
#endif
/* */
#if defined(PLATFORM_SUNOS4) || defined(PLATFORM_AIX_V3) || defined(__BEOS__)
#error "O_NONBLOCK does not work on this platform"
#endif
int
main ()
{
/* O_NONBLOCK source test */
int flags = 0;
if(0 != fcntl(0, F_SETFL, flags | O_NONBLOCK))
return 1;
return 0;
}
#endif
/* tests for gethostbyaddr_r or gethostbyname_r */
#if defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT) || \
defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT) || \
defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT) || \
defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT) || \
defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT) || \
defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT)
# define _REENTRANT
/* no idea whether _REENTRANT is always set, just invent a new flag */
# define TEST_GETHOSTBYFOO_REENTRANT
#endif
#if defined(HAVE_GETHOSTBYADDR_R_5) || \
defined(HAVE_GETHOSTBYADDR_R_7) || \
defined(HAVE_GETHOSTBYADDR_R_8) || \
defined(HAVE_GETHOSTBYNAME_R_3) || \
defined(HAVE_GETHOSTBYNAME_R_5) || \
defined(HAVE_GETHOSTBYNAME_R_6) || \
defined(TEST_GETHOSTBYFOO_REENTRANT)
#include <sys/types.h>
#include <netdb.h>
int main(void)
{
char *address = "example.com";
int length = 0;
int type = 0;
struct hostent h;
int rc = 0;
#if defined(HAVE_GETHOSTBYADDR_R_5) || \
defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT) || \
\
defined(HAVE_GETHOSTBYNAME_R_3) || \
defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT)
struct hostent_data hdata;
#elif defined(HAVE_GETHOSTBYADDR_R_7) || \
defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT) || \
defined(HAVE_GETHOSTBYADDR_R_8) || \
defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT) || \
\
defined(HAVE_GETHOSTBYNAME_R_5) || \
defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT) || \
defined(HAVE_GETHOSTBYNAME_R_6) || \
defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT)
char buffer[8192];
int h_errnop;
struct hostent *hp;
#endif
#ifndef gethostbyaddr_r
(void)gethostbyaddr_r;
#endif
#if defined(HAVE_GETHOSTBYADDR_R_5) || \
defined(HAVE_GETHOSTBYADDR_R_5_REENTRANT)
rc = gethostbyaddr_r(address, length, type, &h, &hdata);
(void)rc;
#elif defined(HAVE_GETHOSTBYADDR_R_7) || \
defined(HAVE_GETHOSTBYADDR_R_7_REENTRANT)
hp = gethostbyaddr_r(address, length, type, &h, buffer, 8192, &h_errnop);
(void)hp;
#elif defined(HAVE_GETHOSTBYADDR_R_8) || \
defined(HAVE_GETHOSTBYADDR_R_8_REENTRANT)
rc = gethostbyaddr_r(address, length, type, &h, buffer, 8192, &hp, &h_errnop);
(void)rc;
#endif
#if defined(HAVE_GETHOSTBYNAME_R_3) || \
defined(HAVE_GETHOSTBYNAME_R_3_REENTRANT)
rc = gethostbyname_r(address, &h, &hdata);
#elif defined(HAVE_GETHOSTBYNAME_R_5) || \
defined(HAVE_GETHOSTBYNAME_R_5_REENTRANT)
rc = gethostbyname_r(address, &h, buffer, 8192, &h_errnop);
(void)hp; /* not used for test */
#elif defined(HAVE_GETHOSTBYNAME_R_6) || \
defined(HAVE_GETHOSTBYNAME_R_6_REENTRANT)
rc = gethostbyname_r(address, &h, buffer, 8192, &hp, &h_errnop);
#endif
(void)length;
(void)type;
(void)rc;
return 0;
}
#endif
#ifdef HAVE_SOCKLEN_T
#ifdef _WIN32
#include <ws2tcpip.h>
#else
#include <sys/types.h>
#include <sys/socket.h>
#endif
int
main ()
{
if ((socklen_t *) 0)
return 0;
if (sizeof (socklen_t))
return 0;
;
return 0;
}
#endif
#ifdef HAVE_IN_ADDR_T
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
int
main ()
{
if ((in_addr_t *) 0)
return 0;
if (sizeof (in_addr_t))
return 0;
;
return 0;
}
#endif
#ifdef HAVE_BOOL_T
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_STDBOOL_H
#include <stdbool.h>
#endif
int
main ()
{
if (sizeof (bool *) )
return 0;
;
return 0;
}
#endif
#ifdef STDC_HEADERS
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <float.h>
int main() { return 0; }
#endif
#ifdef RETSIGTYPE_TEST
#include <sys/types.h>
#include <signal.h>
#ifdef signal
# undef signal
#endif
#ifdef __cplusplus
extern "C" void (*signal (int, void (*)(int)))(int);
#else
void (*signal ()) ();
#endif
int
main ()
{
return 0;
}
#endif
#ifdef HAVE_INET_NTOA_R_DECL
#include <arpa/inet.h>
typedef void (*func_type)();
int main()
{
#ifndef inet_ntoa_r
func_type func;
func = (func_type)inet_ntoa_r;
(void)func;
#endif
return 0;
}
#endif
#ifdef HAVE_INET_NTOA_R_DECL_REENTRANT
#define _REENTRANT
#include <arpa/inet.h>
typedef void (*func_type)();
int main()
{
#ifndef inet_ntoa_r
func_type func;
func = (func_type)&inet_ntoa_r;
(void)func;
#endif
return 0;
}
#endif
#ifdef HAVE_GETADDRINFO
#include <netdb.h>
#include <sys/types.h>
#include <sys/socket.h>
int main(void) {
struct addrinfo hints, *ai;
int error;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
#ifndef getaddrinfo
(void)getaddrinfo;
#endif
error = getaddrinfo("127.0.0.1", "8080", &hints, &ai);
if (error) {
return 1;
}
return 0;
}
#endif
#ifdef HAVE_FILE_OFFSET_BITS
#ifdef _FILE_OFFSET_BITS
#undef _FILE_OFFSET_BITS
#endif
#define _FILE_OFFSET_BITS 64
#include <sys/types.h>
/* Check that off_t can represent 2**63 - 1 correctly.
We can't simply define LARGE_OFF_T to be 9223372036854775807,
since some C++ compilers masquerading as C compilers
incorrectly reject 9223372036854775807. */
#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
&& LARGE_OFF_T % 2147483647 == 1)
? 1 : -1];
int main () { ; return 0; }
#endif
#ifdef HAVE_IOCTLSOCKET
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
/* ioctlsocket source code */
int socket;
unsigned long flags = ioctlsocket(socket, FIONBIO, &flags);
;
return 0;
}
#endif
#ifdef HAVE_IOCTLSOCKET_CAMEL
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
/* IoctlSocket source code */
if(0 != IoctlSocket(0, 0, 0))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTLSOCKET_CAMEL_FIONBIO
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
/* IoctlSocket source code */
long flags = 0;
if(0 != ioctlsocket(0, FIONBIO, &flags))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTLSOCKET_FIONBIO
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
int flags = 0;
if(0 != ioctlsocket(0, FIONBIO, &flags))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTL_FIONBIO
/* headers for FIONBIO test */
/* includes start */
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
# include <sys/ioctl.h>
#endif
#ifdef HAVE_STROPTS_H
# include <stropts.h>
#endif
int
main ()
{
int flags = 0;
if(0 != ioctl(0, FIONBIO, &flags))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTL_SIOCGIFADDR
/* headers for FIONBIO test */
/* includes start */
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
# include <sys/ioctl.h>
#endif
#ifdef HAVE_STROPTS_H
# include <stropts.h>
#endif
#include <net/if.h>
int
main ()
{
struct ifreq ifr;
if(0 != ioctl(0, SIOCGIFADDR, &ifr))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_SETSOCKOPT_SO_NONBLOCK
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
/* includes start */
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
/* includes end */
int
main ()
{
if(0 != setsockopt(0, SOL_SOCKET, SO_NONBLOCK, 0, 0))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_GLIBC_STRERROR_R
#include <string.h>
#include <errno.h>
void check(char c) {}
int
main () {
char buffer[1024];
/* This will not compile if strerror_r does not return a char* */
check(strerror_r(EACCES, buffer, sizeof(buffer))[0]);
return 0;
}
#endif
#ifdef HAVE_POSIX_STRERROR_R
#include <string.h>
#include <errno.h>
/* float, because a pointer can't be implicitly cast to float */
void check(float f) {}
int
main () {
char buffer[1024];
/* This will not compile if strerror_r does not return an int */
check(strerror_r(EACCES, buffer, sizeof(buffer)));
return 0;
}
#endif
#ifdef HAVE_FSETXATTR_6
#include <sys/xattr.h> /* header from libc, not from libattr */
int
main() {
fsetxattr(0, 0, 0, 0, 0, 0);
return 0;
}
#endif
#ifdef HAVE_FSETXATTR_5
#include <sys/xattr.h> /* header from libc, not from libattr */
int
main() {
fsetxattr(0, 0, 0, 0, 0);
return 0;
}
#endif
#ifdef HAVE_CLOCK_GETTIME_MONOTONIC
#include <time.h>
int
main() {
struct timespec ts = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &ts);
return 0;
}
#endif
#ifdef HAVE_BUILTIN_AVAILABLE
int
main() {
if(__builtin_available(macOS 10.12, *)) {}
return 0;
}
#endif
#ifdef HAVE_VARIADIC_MACROS_C99
#define c99_vmacro3(first, ...) fun3(first, __VA_ARGS__)
#define c99_vmacro2(first, ...) fun2(first, __VA_ARGS__)
int fun3(int arg1, int arg2, int arg3);
int fun2(int arg1, int arg2);
int fun3(int arg1, int arg2, int arg3) {
return arg1 + arg2 + arg3;
}
int fun2(int arg1, int arg2) {
return arg1 + arg2;
}
int
main() {
int res3 = c99_vmacro3(1, 2, 3);
int res2 = c99_vmacro2(1, 2);
(void)res3;
(void)res2;
return 0;
}
#endif
#ifdef HAVE_VARIADIC_MACROS_GCC
#define gcc_vmacro3(first, args...) fun3(first, args)
#define gcc_vmacro2(first, args...) fun2(first, args)
int fun3(int arg1, int arg2, int arg3);
int fun2(int arg1, int arg2);
int fun3(int arg1, int arg2, int arg3) {
return arg1 + arg2 + arg3;
}
int fun2(int arg1, int arg2) {
return arg1 + arg2;
}
int
main() {
int res3 = gcc_vmacro3(1, 2, 3);
int res2 = gcc_vmacro2(1, 2);
(void)res3;
(void)res2;
return 0;
}
#endif

View File

@ -0,0 +1,84 @@
#File defines convenience macros for available feature testing
# This macro checks if the symbol exists in the library and if it
# does, it prepends library to the list. It is intended to be called
# multiple times with a sequence of possibly dependent libraries in
# order of least-to-most-dependent. Some libraries depend on others
# to link correctly.
macro(check_library_exists_concat LIBRARY SYMBOL VARIABLE)
check_library_exists("${LIBRARY};${CURL_LIBS}" ${SYMBOL} "${CMAKE_LIBRARY_PATH}"
${VARIABLE})
if(${VARIABLE})
set(CURL_LIBS ${LIBRARY} ${CURL_LIBS})
endif()
endmacro()
# Check if header file exists and add it to the list.
# This macro is intended to be called multiple times with a sequence of
# possibly dependent header files. Some headers depend on others to be
# compiled correctly.
macro(check_include_file_concat FILE VARIABLE)
check_include_files("${CURL_INCLUDES};${FILE}" ${VARIABLE})
if(${VARIABLE})
set(CURL_INCLUDES ${CURL_INCLUDES} ${FILE})
set(CURL_TEST_DEFINES "${CURL_TEST_DEFINES} -D${VARIABLE}")
endif()
endmacro()
# For other curl specific tests, use this macro.
macro(curl_internal_test CURL_TEST)
if(NOT DEFINED "${CURL_TEST}")
set(MACRO_CHECK_FUNCTION_DEFINITIONS
"-D${CURL_TEST} ${CURL_TEST_DEFINES} ${CMAKE_REQUIRED_FLAGS}")
if(CMAKE_REQUIRED_LIBRARIES)
set(CURL_TEST_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
endif()
try_compile(${CURL_TEST}
${CMAKE_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/CMake/CurlTests.c
CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS}
"${CURL_TEST_ADD_LIBRARIES}"
OUTPUT_VARIABLE OUTPUT)
if(${CURL_TEST})
set(${CURL_TEST} 1 CACHE INTERNAL "Curl test ${FUNCTION}")
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
"Performing Curl Test ${CURL_TEST} passed with the following output:\n"
"${OUTPUT}\n")
else()
set(${CURL_TEST} "" CACHE INTERNAL "Curl test ${FUNCTION}")
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
"Performing Curl Test ${CURL_TEST} failed with the following output:\n"
"${OUTPUT}\n")
endif()
endif()
endmacro()
macro(curl_nroff_check)
find_program(NROFF NAMES gnroff nroff)
if(NROFF)
# Need a way to write to stdin, this will do
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt" "test")
# Tests for a valid nroff option to generate a manpage
foreach(_MANOPT "-man" "-mandoc")
execute_process(COMMAND "${NROFF}" ${_MANOPT}
OUTPUT_VARIABLE NROFF_MANOPT_OUTPUT
INPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt"
ERROR_QUIET)
# Save the option if it was valid
if(NROFF_MANOPT_OUTPUT)
set(NROFF_MANOPT ${_MANOPT})
set(NROFF_USEFUL ON)
break()
endif()
endforeach()
# No need for the temporary file
file(REMOVE "${CMAKE_CURRENT_BINARY_DIR}/nroff-input.txt")
if(NOT NROFF_USEFUL)
message(WARNING "Found no *nroff option to get plaintext from man pages")
endif()
else()
message(WARNING "Found no *nroff program")
endif()
endmacro()

View File

@ -0,0 +1,260 @@
include(CheckCSourceCompiles)
# The begin of the sources (macros and includes)
set(_source_epilogue "#undef inline")
macro(add_header_include check header)
if(${check})
set(_source_epilogue "${_source_epilogue}\n#include <${header}>")
endif()
endmacro()
set(signature_call_conv)
if(HAVE_WINDOWS_H)
add_header_include(HAVE_WINSOCK2_H "winsock2.h")
add_header_include(HAVE_WINDOWS_H "windows.h")
add_header_include(HAVE_WINSOCK_H "winsock.h")
set(_source_epilogue
"${_source_epilogue}\n#ifndef WIN32_LEAN_AND_MEAN\n#define WIN32_LEAN_AND_MEAN\n#endif")
set(signature_call_conv "PASCAL")
if(HAVE_LIBWS2_32)
set(CMAKE_REQUIRED_LIBRARIES ws2_32)
endif()
else()
add_header_include(HAVE_SYS_TYPES_H "sys/types.h")
add_header_include(HAVE_SYS_SOCKET_H "sys/socket.h")
endif()
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
check_c_source_compiles("${_source_epilogue}
int main(void) {
recv(0, 0, 0, 0);
return 0;
}" curl_cv_recv)
if(curl_cv_recv)
if(NOT DEFINED curl_cv_func_recv_args OR "${curl_cv_func_recv_args}" STREQUAL "unknown")
foreach(recv_retv "int" "ssize_t" )
foreach(recv_arg1 "SOCKET" "int" )
foreach(recv_arg2 "char *" "void *" )
foreach(recv_arg3 "int" "size_t" "socklen_t" "unsigned int")
foreach(recv_arg4 "int" "unsigned int")
if(NOT curl_cv_func_recv_done)
unset(curl_cv_func_recv_test CACHE)
check_c_source_compiles("
${_source_epilogue}
extern ${recv_retv} ${signature_call_conv}
recv(${recv_arg1}, ${recv_arg2}, ${recv_arg3}, ${recv_arg4});
int main(void) {
${recv_arg1} s=0;
${recv_arg2} buf=0;
${recv_arg3} len=0;
${recv_arg4} flags=0;
${recv_retv} res = recv(s, buf, len, flags);
(void) res;
return 0;
}"
curl_cv_func_recv_test)
if(curl_cv_func_recv_test)
set(curl_cv_func_recv_args
"${recv_arg1},${recv_arg2},${recv_arg3},${recv_arg4},${recv_retv}")
set(RECV_TYPE_ARG1 "${recv_arg1}")
set(RECV_TYPE_ARG2 "${recv_arg2}")
set(RECV_TYPE_ARG3 "${recv_arg3}")
set(RECV_TYPE_ARG4 "${recv_arg4}")
set(RECV_TYPE_RETV "${recv_retv}")
set(HAVE_RECV 1)
set(curl_cv_func_recv_done 1)
endif()
endif()
endforeach()
endforeach()
endforeach()
endforeach()
endforeach()
else()
string(REGEX REPLACE "^([^,]*),[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG1 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,([^,]*),[^,]*,[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG2 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,([^,]*),[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG3 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,([^,]*),[^,]*$" "\\1" RECV_TYPE_ARG4 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)$" "\\1" RECV_TYPE_RETV "${curl_cv_func_recv_args}")
endif()
if("${curl_cv_func_recv_args}" STREQUAL "unknown")
message(FATAL_ERROR "Cannot find proper types to use for recv args")
endif()
else()
message(FATAL_ERROR "Unable to link function recv")
endif()
set(curl_cv_func_recv_args "${curl_cv_func_recv_args}" CACHE INTERNAL "Arguments for recv")
set(HAVE_RECV 1)
check_c_source_compiles("${_source_epilogue}
int main(void) {
send(0, 0, 0, 0);
return 0;
}" curl_cv_send)
if(curl_cv_send)
if(NOT DEFINED curl_cv_func_send_args OR "${curl_cv_func_send_args}" STREQUAL "unknown")
foreach(send_retv "int" "ssize_t" )
foreach(send_arg1 "SOCKET" "int" "ssize_t" )
foreach(send_arg2 "const char *" "const void *" "void *" "char *")
foreach(send_arg3 "int" "size_t" "socklen_t" "unsigned int")
foreach(send_arg4 "int" "unsigned int")
if(NOT curl_cv_func_send_done)
unset(curl_cv_func_send_test CACHE)
check_c_source_compiles("
${_source_epilogue}
extern ${send_retv} ${signature_call_conv}
send(${send_arg1}, ${send_arg2}, ${send_arg3}, ${send_arg4});
int main(void) {
${send_arg1} s=0;
${send_arg2} buf=0;
${send_arg3} len=0;
${send_arg4} flags=0;
${send_retv} res = send(s, buf, len, flags);
(void) res;
return 0;
}"
curl_cv_func_send_test)
if(curl_cv_func_send_test)
string(REGEX REPLACE "(const) .*" "\\1" send_qual_arg2 "${send_arg2}")
string(REGEX REPLACE "const (.*)" "\\1" send_arg2 "${send_arg2}")
set(curl_cv_func_send_args
"${send_arg1},${send_arg2},${send_arg3},${send_arg4},${send_retv},${send_qual_arg2}")
set(SEND_TYPE_ARG1 "${send_arg1}")
set(SEND_TYPE_ARG2 "${send_arg2}")
set(SEND_TYPE_ARG3 "${send_arg3}")
set(SEND_TYPE_ARG4 "${send_arg4}")
set(SEND_TYPE_RETV "${send_retv}")
set(HAVE_SEND 1)
set(curl_cv_func_send_done 1)
endif()
endif()
endforeach()
endforeach()
endforeach()
endforeach()
endforeach()
else()
string(REGEX REPLACE "^([^,]*),[^,]*,[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG1 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,([^,]*),[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG2 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,([^,]*),[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG3 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,([^,]*),[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG4 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,([^,]*),[^,]*$" "\\1" SEND_TYPE_RETV "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)$" "\\1" SEND_QUAL_ARG2 "${curl_cv_func_send_args}")
endif()
if("${curl_cv_func_send_args}" STREQUAL "unknown")
message(FATAL_ERROR "Cannot find proper types to use for send args")
endif()
set(SEND_QUAL_ARG2 "const")
else()
message(FATAL_ERROR "Unable to link function send")
endif()
set(curl_cv_func_send_args "${curl_cv_func_send_args}" CACHE INTERNAL "Arguments for send")
set(HAVE_SEND 1)
check_c_source_compiles("${_source_epilogue}
int main(void) {
int flag = MSG_NOSIGNAL;
(void)flag;
return 0;
}" HAVE_MSG_NOSIGNAL)
if(NOT HAVE_WINDOWS_H)
add_header_include(HAVE_SYS_TIME_H "sys/time.h")
add_header_include(TIME_WITH_SYS_TIME "time.h")
add_header_include(HAVE_TIME_H "time.h")
endif()
check_c_source_compiles("${_source_epilogue}
int main(void) {
struct timeval ts;
ts.tv_sec = 0;
ts.tv_usec = 0;
(void)ts;
return 0;
}" HAVE_STRUCT_TIMEVAL)
set(HAVE_SIG_ATOMIC_T 1)
set(CMAKE_REQUIRED_FLAGS)
if(HAVE_SIGNAL_H)
set(CMAKE_REQUIRED_FLAGS "-DHAVE_SIGNAL_H")
set(CMAKE_EXTRA_INCLUDE_FILES "signal.h")
endif()
check_type_size("sig_atomic_t" SIZEOF_SIG_ATOMIC_T)
if(HAVE_SIZEOF_SIG_ATOMIC_T)
check_c_source_compiles("
#ifdef HAVE_SIGNAL_H
# include <signal.h>
#endif
int main(void) {
static volatile sig_atomic_t dummy = 0;
(void)dummy;
return 0;
}" HAVE_SIG_ATOMIC_T_NOT_VOLATILE)
if(NOT HAVE_SIG_ATOMIC_T_NOT_VOLATILE)
set(HAVE_SIG_ATOMIC_T_VOLATILE 1)
endif()
endif()
if(HAVE_WINDOWS_H)
set(CMAKE_EXTRA_INCLUDE_FILES winsock2.h)
else()
set(CMAKE_EXTRA_INCLUDE_FILES)
if(HAVE_SYS_SOCKET_H)
set(CMAKE_EXTRA_INCLUDE_FILES sys/socket.h)
endif()
endif()
check_type_size("struct sockaddr_storage" SIZEOF_STRUCT_SOCKADDR_STORAGE)
if(HAVE_SIZEOF_STRUCT_SOCKADDR_STORAGE)
set(HAVE_STRUCT_SOCKADDR_STORAGE 1)
endif()
unset(CMAKE_TRY_COMPILE_TARGET_TYPE)
if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)
# if not cross-compilation...
include(CheckCSourceRuns)
set(CMAKE_REQUIRED_FLAGS "")
if(HAVE_SYS_POLL_H)
set(CMAKE_REQUIRED_FLAGS "-DHAVE_SYS_POLL_H")
elseif(HAVE_POLL_H)
set(CMAKE_REQUIRED_FLAGS "-DHAVE_POLL_H")
endif()
check_c_source_runs("
#include <stdlib.h>
#include <sys/time.h>
#ifdef HAVE_SYS_POLL_H
# include <sys/poll.h>
#elif HAVE_POLL_H
# include <poll.h>
#endif
int main(void)
{
if(0 != poll(0, 0, 10)) {
return 1; /* fail */
}
else {
/* detect the 10.12 poll() breakage */
struct timeval before, after;
int rc;
size_t us;
gettimeofday(&before, NULL);
rc = poll(NULL, 0, 500);
gettimeofday(&after, NULL);
us = (after.tv_sec - before.tv_sec) * 1000000 +
(after.tv_usec - before.tv_usec);
if(us < 400000) {
return 1;
}
}
return 0;
}" HAVE_POLL_FINE)
endif()

View File

@ -0,0 +1,28 @@
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
CMake files under this directory were reused from project curl.
Here are links to original source files:
https://github.com/curl/curl/blob/master/CMake/CurlSymbolHiding.cmake
https://github.com/curl/curl/blob/master/CMake/CurlTests,c
https://github.com/curl/curl/blob/master/CMake/Macros.cmake
https://github.com/curl/curl/blob/master/CMake/OtherTests.cmake

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,634 @@
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
# NOTE:
# This file is shrinked and reworked version of original curl CMakeLists.txt
# Original file link https://github.com/curl/curl/blob/3b8bbbbd1609c638a3d3d0acb148a33dedb67be3/CMakeLists.txt
# If you need to update curl building you can find patch file in this directory
# and apply it to fresh original CMakeLists.txt file.
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
SET(CURL_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/curl)
SET(CURL_LIBRARY_DIR ${CURL_SOURCE_DIR}/lib)
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake;${CMAKE_MODULE_PATH}")
# Disable status messages when perform checks
set(CMAKE_REQUIRED_QUIET TRUE)
include(Macros)
include(CMakeDependentOption)
include(CheckCCompilerFlag)
file(READ ${CURL_SOURCE_DIR}/include/curl/curlver.h CURL_VERSION_H_CONTENTS)
string(REGEX MATCH "#define LIBCURL_VERSION \"[^\"]*"
CURL_VERSION ${CURL_VERSION_H_CONTENTS})
string(REGEX REPLACE "[^\"]+\"" "" CURL_VERSION ${CURL_VERSION})
string(REGEX MATCH "#define LIBCURL_VERSION_NUM 0x[0-9a-fA-F]+"
CURL_VERSION_NUM ${CURL_VERSION_H_CONTENTS})
string(REGEX REPLACE "[^0]+0x" "" CURL_VERSION_NUM ${CURL_VERSION_NUM})
message(STATUS "Use curl version=[${CURL_VERSION}]")
set(OPERATING_SYSTEM "${CMAKE_SYSTEM_NAME}")
set(OS "\"${CMAKE_SYSTEM_NAME}\"")
option(PICKY_COMPILER "Enable picky compiler options" ON)
option(ENABLE_THREADED_RESOLVER "Set to ON to enable threaded DNS lookup" ON)
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG)
if(PICKY_COMPILER)
foreach(_CCOPT -pedantic -Wall -W -Wpointer-arith -Wwrite-strings -Wunused -Wshadow -Winline -Wnested-externs -Wmissing-declarations -Wmissing-prototypes -Wno-long-long -Wfloat-equal -Wno-multichar -Wsign-compare -Wundef -Wno-format-nonliteral -Wendif-labels -Wstrict-prototypes -Wdeclaration-after-statement -Wstrict-aliasing=3 -Wcast-align -Wtype-limits -Wold-style-declaration -Wmissing-parameter-type -Wempty-body -Wclobbered -Wignored-qualifiers -Wconversion -Wno-sign-conversion -Wvla -Wdouble-promotion -Wno-system-headers -Wno-pedantic-ms-format)
# surprisingly, CHECK_C_COMPILER_FLAG needs a new variable to store each new
# test result in.
check_c_compiler_flag(${_CCOPT} OPT${_CCOPT})
if(OPT${_CCOPT})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_CCOPT}")
endif()
endforeach()
endif()
endif()
# For debug libs and exes, add "-d" postfix
if(NOT DEFINED CMAKE_DEBUG_POSTFIX)
set(CMAKE_DEBUG_POSTFIX "-d")
endif()
# initialize CURL_LIBS
set(CURL_LIBS "")
include(CurlSymbolHiding)
# Http only
set(CURL_DISABLE_FTP ON)
set(CURL_DISABLE_LDAP ON)
set(CURL_DISABLE_LDAPS ON)
set(CURL_DISABLE_TELNET ON)
set(CURL_DISABLE_DICT ON)
set(CURL_DISABLE_FILE ON)
set(CURL_DISABLE_TFTP ON)
set(CURL_DISABLE_RTSP ON)
set(CURL_DISABLE_POP3 ON)
set(CURL_DISABLE_IMAP ON)
set(CURL_DISABLE_SMTP ON)
set(CURL_DISABLE_GOPHER ON)
option(CURL_DISABLE_COOKIES "to disable cookies support" OFF)
mark_as_advanced(CURL_DISABLE_COOKIES)
option(CURL_DISABLE_CRYPTO_AUTH "to disable cryptographic authentication" OFF)
mark_as_advanced(CURL_DISABLE_CRYPTO_AUTH)
option(CURL_DISABLE_VERBOSE_STRINGS "to disable verbose strings" OFF)
mark_as_advanced(CURL_DISABLE_VERBOSE_STRINGS)
option(ENABLE_IPV6 "Define if you want to enable IPv6 support" ON)
mark_as_advanced(ENABLE_IPV6)
if(ENABLE_IPV6 AND NOT WIN32)
include(CheckStructHasMember)
check_struct_has_member("struct sockaddr_in6" sin6_addr "netinet/in.h"
HAVE_SOCKADDR_IN6_SIN6_ADDR)
check_struct_has_member("struct sockaddr_in6" sin6_scope_id "netinet/in.h"
HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID)
if(NOT HAVE_SOCKADDR_IN6_SIN6_ADDR)
message(WARNING "struct sockaddr_in6 not available, disabling IPv6 support")
# Force the feature off as this name is used as guard macro...
set(ENABLE_IPV6 OFF
CACHE BOOL "Define if you want to enable IPv6 support" FORCE)
endif()
endif()
curl_nroff_check()
# We need ansi c-flags, especially on HP
set(CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}")
set(CMAKE_REQUIRED_FLAGS ${CMAKE_ANSI_CFLAGS})
# Include all the necessary files for macros
include(CheckFunctionExists)
include(CheckIncludeFile)
include(CheckIncludeFiles)
include(CheckLibraryExists)
include(CheckSymbolExists)
include(CheckTypeSize)
include(CheckCSourceCompiles)
if(ENABLE_THREADED_RESOLVER)
find_package(Threads REQUIRED)
if(WIN32)
set(USE_THREADS_WIN32 ON)
else()
set(USE_THREADS_POSIX ${CMAKE_USE_PTHREADS_INIT})
set(HAVE_PTHREAD_H ${CMAKE_USE_PTHREADS_INIT})
endif()
set(CURL_LIBS ${CURL_LIBS} ${CMAKE_THREAD_LIBS_INIT})
endif()
# Check for all needed libraries
check_library_exists_concat("${CMAKE_DL_LIBS}" dlopen HAVE_LIBDL)
check_library_exists_concat("socket" connect HAVE_LIBSOCKET)
check_library_exists("c" gethostbyname "" NOT_NEED_LIBNSL)
check_function_exists(gethostname HAVE_GETHOSTNAME)
# From cmake/find/ssl.cmake
if (OPENSSL_FOUND)
set(SSL_ENABLED ON)
set(USE_OPENSSL ON)
list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES})
check_include_file("openssl/crypto.h" HAVE_OPENSSL_CRYPTO_H)
check_include_file("openssl/err.h" HAVE_OPENSSL_ERR_H)
check_include_file("openssl/pem.h" HAVE_OPENSSL_PEM_H)
check_include_file("openssl/rsa.h" HAVE_OPENSSL_RSA_H)
check_include_file("openssl/ssl.h" HAVE_OPENSSL_SSL_H)
check_include_file("openssl/x509.h" HAVE_OPENSSL_X509_H)
check_include_file("openssl/rand.h" HAVE_OPENSSL_RAND_H)
check_symbol_exists(RAND_status "${CURL_INCLUDES}" HAVE_RAND_STATUS)
check_symbol_exists(RAND_screen "${CURL_INCLUDES}" HAVE_RAND_SCREEN)
check_symbol_exists(RAND_egd "${CURL_INCLUDES}" HAVE_RAND_EGD)
endif()
# Check for idn
check_library_exists_concat("idn2" idn2_lookup_ul HAVE_LIBIDN2)
# Check for symbol dlopen (same as HAVE_LIBDL)
check_library_exists("${CURL_LIBS}" dlopen "" HAVE_DLOPEN)
# From /cmake/find/zlib.cmake
if (ZLIB_FOUND)
set(HAVE_ZLIB_H ON)
set(HAVE_LIBZ ON)
set(USE_ZLIB ON)
list(APPEND CURL_LIBS ${ZLIB_LIBRARIES})
endif()
option(ENABLE_UNIX_SOCKETS "Define if you want Unix domain sockets support" ON)
if(ENABLE_UNIX_SOCKETS)
include(CheckStructHasMember)
check_struct_has_member("struct sockaddr_un" sun_path "sys/un.h" USE_UNIX_SOCKETS)
else()
unset(USE_UNIX_SOCKETS CACHE)
endif()
# CA handling
# Explicitly set to most common case
if (OPENSSL_FOUND)
set(CURL_CA_BUNDLE "/etc/ssl/certs/ca-certificates.crt")
set(CURL_CA_BUNDLE_SET TRUE CACHE BOOL "Path to the CA bundle has been set")
set(CURL_CA_PATH "/etc/ssl/certs")
set(CURL_CA_PATH_SET TRUE CACHE BOOL "Path to the CA bundle has been set")
endif()
check_include_file_concat("stdio.h" HAVE_STDIO_H)
check_include_file_concat("inttypes.h" HAVE_INTTYPES_H)
check_include_file_concat("sys/filio.h" HAVE_SYS_FILIO_H)
check_include_file_concat("sys/ioctl.h" HAVE_SYS_IOCTL_H)
check_include_file_concat("sys/param.h" HAVE_SYS_PARAM_H)
check_include_file_concat("sys/poll.h" HAVE_SYS_POLL_H)
check_include_file_concat("sys/resource.h" HAVE_SYS_RESOURCE_H)
check_include_file_concat("sys/select.h" HAVE_SYS_SELECT_H)
check_include_file_concat("sys/socket.h" HAVE_SYS_SOCKET_H)
check_include_file_concat("sys/sockio.h" HAVE_SYS_SOCKIO_H)
check_include_file_concat("sys/stat.h" HAVE_SYS_STAT_H)
check_include_file_concat("sys/time.h" HAVE_SYS_TIME_H)
check_include_file_concat("sys/types.h" HAVE_SYS_TYPES_H)
check_include_file_concat("sys/uio.h" HAVE_SYS_UIO_H)
check_include_file_concat("sys/un.h" HAVE_SYS_UN_H)
check_include_file_concat("sys/utime.h" HAVE_SYS_UTIME_H)
check_include_file_concat("sys/xattr.h" HAVE_SYS_XATTR_H)
check_include_file_concat("alloca.h" HAVE_ALLOCA_H)
check_include_file_concat("arpa/inet.h" HAVE_ARPA_INET_H)
check_include_file_concat("arpa/tftp.h" HAVE_ARPA_TFTP_H)
check_include_file_concat("assert.h" HAVE_ASSERT_H)
check_include_file_concat("crypto.h" HAVE_CRYPTO_H)
check_include_file_concat("des.h" HAVE_DES_H)
check_include_file_concat("err.h" HAVE_ERR_H)
check_include_file_concat("errno.h" HAVE_ERRNO_H)
check_include_file_concat("fcntl.h" HAVE_FCNTL_H)
check_include_file_concat("idn2.h" HAVE_IDN2_H)
check_include_file_concat("ifaddrs.h" HAVE_IFADDRS_H)
check_include_file_concat("io.h" HAVE_IO_H)
check_include_file_concat("krb.h" HAVE_KRB_H)
check_include_file_concat("libgen.h" HAVE_LIBGEN_H)
check_include_file_concat("locale.h" HAVE_LOCALE_H)
check_include_file_concat("net/if.h" HAVE_NET_IF_H)
check_include_file_concat("netdb.h" HAVE_NETDB_H)
check_include_file_concat("netinet/in.h" HAVE_NETINET_IN_H)
check_include_file_concat("netinet/tcp.h" HAVE_NETINET_TCP_H)
check_include_file_concat("pem.h" HAVE_PEM_H)
check_include_file_concat("poll.h" HAVE_POLL_H)
check_include_file_concat("pwd.h" HAVE_PWD_H)
check_include_file_concat("rsa.h" HAVE_RSA_H)
check_include_file_concat("setjmp.h" HAVE_SETJMP_H)
check_include_file_concat("sgtty.h" HAVE_SGTTY_H)
check_include_file_concat("signal.h" HAVE_SIGNAL_H)
check_include_file_concat("ssl.h" HAVE_SSL_H)
check_include_file_concat("stdbool.h" HAVE_STDBOOL_H)
check_include_file_concat("stdint.h" HAVE_STDINT_H)
check_include_file_concat("stdio.h" HAVE_STDIO_H)
check_include_file_concat("stdlib.h" HAVE_STDLIB_H)
check_include_file_concat("string.h" HAVE_STRING_H)
check_include_file_concat("strings.h" HAVE_STRINGS_H)
check_include_file_concat("stropts.h" HAVE_STROPTS_H)
check_include_file_concat("termio.h" HAVE_TERMIO_H)
check_include_file_concat("termios.h" HAVE_TERMIOS_H)
check_include_file_concat("time.h" HAVE_TIME_H)
check_include_file_concat("unistd.h" HAVE_UNISTD_H)
check_include_file_concat("utime.h" HAVE_UTIME_H)
check_include_file_concat("x509.h" HAVE_X509_H)
check_include_file_concat("process.h" HAVE_PROCESS_H)
check_include_file_concat("stddef.h" HAVE_STDDEF_H)
check_include_file_concat("dlfcn.h" HAVE_DLFCN_H)
check_include_file_concat("malloc.h" HAVE_MALLOC_H)
check_include_file_concat("memory.h" HAVE_MEMORY_H)
check_include_file_concat("netinet/if_ether.h" HAVE_NETINET_IF_ETHER_H)
check_include_file_concat("stdint.h" HAVE_STDINT_H)
check_include_file_concat("sockio.h" HAVE_SOCKIO_H)
check_include_file_concat("sys/utsname.h" HAVE_SYS_UTSNAME_H)
check_type_size(size_t SIZEOF_SIZE_T)
check_type_size(ssize_t SIZEOF_SSIZE_T)
check_type_size("long long" SIZEOF_LONG_LONG)
check_type_size("long" SIZEOF_LONG)
check_type_size("short" SIZEOF_SHORT)
check_type_size("int" SIZEOF_INT)
check_type_size("__int64" SIZEOF___INT64)
check_type_size("long double" SIZEOF_LONG_DOUBLE)
check_type_size("time_t" SIZEOF_TIME_T)
if(NOT HAVE_SIZEOF_SSIZE_T)
if(SIZEOF_LONG EQUAL SIZEOF_SIZE_T)
set(ssize_t long)
endif()
if(NOT ssize_t AND SIZEOF___INT64 EQUAL SIZEOF_SIZE_T)
set(ssize_t __int64)
endif()
endif()
# off_t is sized later, after the HAVE_FILE_OFFSET_BITS test
if(HAVE_SIZEOF_LONG_LONG)
set(HAVE_LONGLONG 1)
set(HAVE_LL 1)
endif()
find_file(RANDOM_FILE urandom /dev)
mark_as_advanced(RANDOM_FILE)
# Check for some functions that are used
if(HAVE_LIBWS2_32)
set(CMAKE_REQUIRED_LIBRARIES ws2_32)
elseif(HAVE_LIBSOCKET)
set(CMAKE_REQUIRED_LIBRARIES socket)
endif()
check_symbol_exists(basename "${CURL_INCLUDES}" HAVE_BASENAME)
check_symbol_exists(socket "${CURL_INCLUDES}" HAVE_SOCKET)
check_symbol_exists(select "${CURL_INCLUDES}" HAVE_SELECT)
check_symbol_exists(poll "${CURL_INCLUDES}" HAVE_POLL)
check_symbol_exists(strdup "${CURL_INCLUDES}" HAVE_STRDUP)
check_symbol_exists(strstr "${CURL_INCLUDES}" HAVE_STRSTR)
check_symbol_exists(strtok_r "${CURL_INCLUDES}" HAVE_STRTOK_R)
check_symbol_exists(strftime "${CURL_INCLUDES}" HAVE_STRFTIME)
check_symbol_exists(uname "${CURL_INCLUDES}" HAVE_UNAME)
check_symbol_exists(strcasecmp "${CURL_INCLUDES}" HAVE_STRCASECMP)
check_symbol_exists(stricmp "${CURL_INCLUDES}" HAVE_STRICMP)
check_symbol_exists(strcmpi "${CURL_INCLUDES}" HAVE_STRCMPI)
check_symbol_exists(strncmpi "${CURL_INCLUDES}" HAVE_STRNCMPI)
check_symbol_exists(alarm "${CURL_INCLUDES}" HAVE_ALARM)
if(NOT HAVE_STRNCMPI)
set(HAVE_STRCMPI)
endif()
check_symbol_exists(gethostbyaddr "${CURL_INCLUDES}" HAVE_GETHOSTBYADDR)
check_symbol_exists(gethostbyaddr_r "${CURL_INCLUDES}" HAVE_GETHOSTBYADDR_R)
check_symbol_exists(gettimeofday "${CURL_INCLUDES}" HAVE_GETTIMEOFDAY)
check_symbol_exists(inet_addr "${CURL_INCLUDES}" HAVE_INET_ADDR)
check_symbol_exists(inet_ntoa "${CURL_INCLUDES}" HAVE_INET_NTOA)
check_symbol_exists(inet_ntoa_r "${CURL_INCLUDES}" HAVE_INET_NTOA_R)
check_symbol_exists(tcsetattr "${CURL_INCLUDES}" HAVE_TCSETATTR)
check_symbol_exists(tcgetattr "${CURL_INCLUDES}" HAVE_TCGETATTR)
check_symbol_exists(perror "${CURL_INCLUDES}" HAVE_PERROR)
check_symbol_exists(closesocket "${CURL_INCLUDES}" HAVE_CLOSESOCKET)
check_symbol_exists(setvbuf "${CURL_INCLUDES}" HAVE_SETVBUF)
check_symbol_exists(sigsetjmp "${CURL_INCLUDES}" HAVE_SIGSETJMP)
check_symbol_exists(getpass_r "${CURL_INCLUDES}" HAVE_GETPASS_R)
check_symbol_exists(strlcat "${CURL_INCLUDES}" HAVE_STRLCAT)
check_symbol_exists(getpwuid "${CURL_INCLUDES}" HAVE_GETPWUID)
check_symbol_exists(getpwuid_r "${CURL_INCLUDES}" HAVE_GETPWUID_R)
check_symbol_exists(geteuid "${CURL_INCLUDES}" HAVE_GETEUID)
check_symbol_exists(usleep "${CURL_INCLUDES}" HAVE_USLEEP)
check_symbol_exists(utime "${CURL_INCLUDES}" HAVE_UTIME)
check_symbol_exists(gmtime_r "${CURL_INCLUDES}" HAVE_GMTIME_R)
check_symbol_exists(localtime_r "${CURL_INCLUDES}" HAVE_LOCALTIME_R)
check_symbol_exists(gethostbyname "${CURL_INCLUDES}" HAVE_GETHOSTBYNAME)
check_symbol_exists(gethostbyname_r "${CURL_INCLUDES}" HAVE_GETHOSTBYNAME_R)
check_symbol_exists(signal "${CURL_INCLUDES}" HAVE_SIGNAL_FUNC)
check_symbol_exists(SIGALRM "${CURL_INCLUDES}" HAVE_SIGNAL_MACRO)
if(HAVE_SIGNAL_FUNC AND HAVE_SIGNAL_MACRO)
set(HAVE_SIGNAL 1)
endif()
check_symbol_exists(uname "${CURL_INCLUDES}" HAVE_UNAME)
check_symbol_exists(strtoll "${CURL_INCLUDES}" HAVE_STRTOLL)
check_symbol_exists(_strtoi64 "${CURL_INCLUDES}" HAVE__STRTOI64)
check_symbol_exists(strerror_r "${CURL_INCLUDES}" HAVE_STRERROR_R)
check_symbol_exists(siginterrupt "${CURL_INCLUDES}" HAVE_SIGINTERRUPT)
check_symbol_exists(perror "${CURL_INCLUDES}" HAVE_PERROR)
check_symbol_exists(fork "${CURL_INCLUDES}" HAVE_FORK)
check_symbol_exists(getaddrinfo "${CURL_INCLUDES}" HAVE_GETADDRINFO)
check_symbol_exists(freeaddrinfo "${CURL_INCLUDES}" HAVE_FREEADDRINFO)
check_symbol_exists(freeifaddrs "${CURL_INCLUDES}" HAVE_FREEIFADDRS)
check_symbol_exists(pipe "${CURL_INCLUDES}" HAVE_PIPE)
check_symbol_exists(ftruncate "${CURL_INCLUDES}" HAVE_FTRUNCATE)
check_symbol_exists(getprotobyname "${CURL_INCLUDES}" HAVE_GETPROTOBYNAME)
check_symbol_exists(getpeername "${CURL_INCLUDES}" HAVE_GETPEERNAME)
check_symbol_exists(getsockname "${CURL_INCLUDES}" HAVE_GETSOCKNAME)
check_symbol_exists(if_nametoindex "${CURL_INCLUDES}" HAVE_IF_NAMETOINDEX)
check_symbol_exists(getrlimit "${CURL_INCLUDES}" HAVE_GETRLIMIT)
check_symbol_exists(setlocale "${CURL_INCLUDES}" HAVE_SETLOCALE)
check_symbol_exists(setmode "${CURL_INCLUDES}" HAVE_SETMODE)
check_symbol_exists(setrlimit "${CURL_INCLUDES}" HAVE_SETRLIMIT)
check_symbol_exists(fcntl "${CURL_INCLUDES}" HAVE_FCNTL)
check_symbol_exists(ioctl "${CURL_INCLUDES}" HAVE_IOCTL)
check_symbol_exists(setsockopt "${CURL_INCLUDES}" HAVE_SETSOCKOPT)
check_function_exists(mach_absolute_time HAVE_MACH_ABSOLUTE_TIME)
check_symbol_exists(fsetxattr "${CURL_INCLUDES}" HAVE_FSETXATTR)
if(HAVE_FSETXATTR)
foreach(CURL_TEST HAVE_FSETXATTR_5 HAVE_FSETXATTR_6)
curl_internal_test(${CURL_TEST})
endforeach()
endif()
# sigaction and sigsetjmp are special. Use special mechanism for
# detecting those, but only if previous attempt failed.
if(HAVE_SIGNAL_H)
check_symbol_exists(sigaction "signal.h" HAVE_SIGACTION)
endif()
if(NOT HAVE_SIGSETJMP)
if(HAVE_SETJMP_H)
check_symbol_exists(sigsetjmp "setjmp.h" HAVE_MACRO_SIGSETJMP)
if(HAVE_MACRO_SIGSETJMP)
set(HAVE_SIGSETJMP 1)
endif()
endif()
endif()
# If there is no stricmp(), do not allow LDAP to parse URLs
if(NOT HAVE_STRICMP)
set(HAVE_LDAP_URL_PARSE 1)
endif()
# Do curl specific tests
foreach(CURL_TEST
HAVE_FCNTL_O_NONBLOCK
HAVE_IOCTLSOCKET
HAVE_IOCTLSOCKET_CAMEL
HAVE_IOCTLSOCKET_CAMEL_FIONBIO
HAVE_IOCTLSOCKET_FIONBIO
HAVE_IOCTL_FIONBIO
HAVE_IOCTL_SIOCGIFADDR
HAVE_SETSOCKOPT_SO_NONBLOCK
HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID
TIME_WITH_SYS_TIME
HAVE_O_NONBLOCK
HAVE_GETHOSTBYADDR_R_5
HAVE_GETHOSTBYADDR_R_7
HAVE_GETHOSTBYADDR_R_8
HAVE_GETHOSTBYADDR_R_5_REENTRANT
HAVE_GETHOSTBYADDR_R_7_REENTRANT
HAVE_GETHOSTBYADDR_R_8_REENTRANT
HAVE_GETHOSTBYNAME_R_3
HAVE_GETHOSTBYNAME_R_5
HAVE_GETHOSTBYNAME_R_6
HAVE_GETHOSTBYNAME_R_3_REENTRANT
HAVE_GETHOSTBYNAME_R_5_REENTRANT
HAVE_GETHOSTBYNAME_R_6_REENTRANT
HAVE_IN_ADDR_T
HAVE_BOOL_T
STDC_HEADERS
RETSIGTYPE_TEST
HAVE_INET_NTOA_R_DECL
HAVE_INET_NTOA_R_DECL_REENTRANT
HAVE_GETADDRINFO
HAVE_FILE_OFFSET_BITS
HAVE_VARIADIC_MACROS_C99
HAVE_VARIADIC_MACROS_GCC
)
curl_internal_test(${CURL_TEST})
endforeach()
if(HAVE_FILE_OFFSET_BITS)
set(_FILE_OFFSET_BITS 64)
set(CMAKE_REQUIRED_FLAGS "-D_FILE_OFFSET_BITS=64")
endif()
check_type_size("off_t" SIZEOF_OFF_T)
# include this header to get the type
set(CMAKE_REQUIRED_INCLUDES "${CURL_SOURCE_DIR}/include")
set(CMAKE_EXTRA_INCLUDE_FILES "curl/system.h")
check_type_size("curl_off_t" SIZEOF_CURL_OFF_T)
set(CMAKE_EXTRA_INCLUDE_FILES "")
foreach(CURL_TEST
HAVE_GLIBC_STRERROR_R
HAVE_POSIX_STRERROR_R
)
curl_internal_test(${CURL_TEST})
endforeach()
# Check for reentrant
foreach(CURL_TEST
HAVE_GETHOSTBYADDR_R_5
HAVE_GETHOSTBYADDR_R_7
HAVE_GETHOSTBYADDR_R_8
HAVE_GETHOSTBYNAME_R_3
HAVE_GETHOSTBYNAME_R_5
HAVE_GETHOSTBYNAME_R_6
HAVE_INET_NTOA_R_DECL_REENTRANT)
if(NOT ${CURL_TEST})
if(${CURL_TEST}_REENTRANT)
set(NEED_REENTRANT 1)
endif()
endif()
endforeach()
if(NEED_REENTRANT)
foreach(CURL_TEST
HAVE_GETHOSTBYADDR_R_5
HAVE_GETHOSTBYADDR_R_7
HAVE_GETHOSTBYADDR_R_8
HAVE_GETHOSTBYNAME_R_3
HAVE_GETHOSTBYNAME_R_5
HAVE_GETHOSTBYNAME_R_6)
set(${CURL_TEST} 0)
if(${CURL_TEST}_REENTRANT)
set(${CURL_TEST} 1)
endif()
endforeach()
endif()
if(HAVE_INET_NTOA_R_DECL_REENTRANT)
set(HAVE_INET_NTOA_R_DECL 1)
set(NEED_REENTRANT 1)
endif()
# Check clock_gettime(CLOCK_MONOTONIC, x) support
curl_internal_test(HAVE_CLOCK_GETTIME_MONOTONIC)
# Check compiler support of __builtin_available()
curl_internal_test(HAVE_BUILTIN_AVAILABLE)
# Some other minor tests
if(NOT HAVE_IN_ADDR_T)
set(in_addr_t "unsigned long")
endif()
# Check for nonblocking
set(HAVE_DISABLED_NONBLOCKING 1)
if(HAVE_FIONBIO OR
HAVE_IOCTLSOCKET OR
HAVE_IOCTLSOCKET_CASE OR
HAVE_O_NONBLOCK)
set(HAVE_DISABLED_NONBLOCKING)
endif()
set(CURL_PULL_SYS_TYPES_H ${HAVE_SYS_TYPES_H})
set(CURL_PULL_SYS_SOCKET_H ${HAVE_SYS_SOCKET_H})
set(CURL_PULL_SYS_POLL_H ${HAVE_SYS_POLL_H})
set(CURL_PULL_STDINT_H ${HAVE_STDINT_H})
set(CURL_PULL_INTTYPES_H ${HAVE_INTTYPES_H})
include(CMake/OtherTests.cmake)
SET(LIB_VAUTH_CFILES
"${CURL_LIBRARY_DIR}/vauth/vauth.c" "${CURL_LIBRARY_DIR}/vauth/cleartext.c" "${CURL_LIBRARY_DIR}/vauth/cram.c"
"${CURL_LIBRARY_DIR}/vauth/digest.c" "${CURL_LIBRARY_DIR}/vauth/digest_sspi.c" "${CURL_LIBRARY_DIR}/vauth/krb5_gssapi.c"
"${CURL_LIBRARY_DIR}/vauth/krb5_sspi.c" "${CURL_LIBRARY_DIR}/vauth/ntlm.c" "${CURL_LIBRARY_DIR}/vauth/ntlm_sspi.c" "${CURL_LIBRARY_DIR}/vauth/oauth2.c"
"${CURL_LIBRARY_DIR}/vauth/spnego_gssapi.c" "${CURL_LIBRARY_DIR}/vauth/spnego_sspi.c")
SET(LIB_VAUTH_HFILES "${CURL_LIBRARY_DIR}/vauth/vauth.h" "${CURL_LIBRARY_DIR}/vauth/digest.h" "${CURL_LIBRARY_DIR}/vauth/ntlm.h")
SET(LIB_VTLS_CFILES "${CURL_LIBRARY_DIR}/vtls/openssl.c" "${CURL_LIBRARY_DIR}/vtls/gtls.c" "${CURL_LIBRARY_DIR}/vtls/vtls.c" "${CURL_LIBRARY_DIR}/vtls/nss.c"
"${CURL_LIBRARY_DIR}/vtls/polarssl.c" "${CURL_LIBRARY_DIR}/vtls/polarssl_threadlock.c"
"${CURL_LIBRARY_DIR}/vtls/wolfssl.c" "${CURL_LIBRARY_DIR}/vtls/schannel.c" "${CURL_LIBRARY_DIR}/vtls/schannel_verify.c"
"${CURL_LIBRARY_DIR}/vtls/sectransp.c" "${CURL_LIBRARY_DIR}/vtls/gskit.c" "${CURL_LIBRARY_DIR}/vtls/mbedtls.c" "${CURL_LIBRARY_DIR}/vtls/mesalink.c"
"${CURL_LIBRARY_DIR}/vtls/bearssl.c")
SET(LIB_VTLS_HFILES "${CURL_LIBRARY_DIR}/vtls/openssl.h" "${CURL_LIBRARY_DIR}/vtls/vtls.h" "${CURL_LIBRARY_DIR}/vtls/gtls.h"
"${CURL_LIBRARY_DIR}/vtls/nssg.h" "${CURL_LIBRARY_DIR}/vtls/polarssl.h" "${CURL_LIBRARY_DIR}/vtls/polarssl_threadlock.h"
"${CURL_LIBRARY_DIR}/vtls/wolfssl.h" "${CURL_LIBRARY_DIR}/vtls/schannel.h" "${CURL_LIBRARY_DIR}/vtls/sectransp.h" "${CURL_LIBRARY_DIR}/vtls/gskit.h"
"${CURL_LIBRARY_DIR}/vtls/mbedtls.h" "${CURL_LIBRARY_DIR}/vtls/mesalink.h" "${CURL_LIBRARY_DIR}/vtls/bearssl.h")
SET(LIB_VQUIC_CFILES "${CURL_LIBRARY_DIR}/vquic/ngtcp2.c" "${CURL_LIBRARY_DIR}/vquic/quiche.c")
SET(LIB_VQUIC_HFILES "${CURL_LIBRARY_DIR}/vquic/ngtcp2.h" "${CURL_LIBRARY_DIR}/vquic/quiche.h")
SET(LIB_VSSH_CFILES "${CURL_LIBRARY_DIR}/vssh/libssh2.c" "${CURL_LIBRARY_DIR}/vssh/libssh.c")
SET(LIB_VSSH_HFILES "${CURL_LIBRARY_DIR}/vssh/ssh.h")
SET(LIB_CFILES "${CURL_LIBRARY_DIR}/file.c"
"${CURL_LIBRARY_DIR}/timeval.c" "${CURL_LIBRARY_DIR}/base64.c" "${CURL_LIBRARY_DIR}/hostip.c" "${CURL_LIBRARY_DIR}/progress.c" "${CURL_LIBRARY_DIR}/formdata.c"
"${CURL_LIBRARY_DIR}/cookie.c" "${CURL_LIBRARY_DIR}/http.c" "${CURL_LIBRARY_DIR}/sendf.c" "${CURL_LIBRARY_DIR}/url.c" "${CURL_LIBRARY_DIR}/dict.c" "${CURL_LIBRARY_DIR}/if2ip.c" "${CURL_LIBRARY_DIR}/speedcheck.c"
"${CURL_LIBRARY_DIR}/ldap.c" "${CURL_LIBRARY_DIR}/version.c" "${CURL_LIBRARY_DIR}/getenv.c" "${CURL_LIBRARY_DIR}/escape.c" "${CURL_LIBRARY_DIR}/mprintf.c" "${CURL_LIBRARY_DIR}/telnet.c" "${CURL_LIBRARY_DIR}/netrc.c"
"${CURL_LIBRARY_DIR}/getinfo.c" "${CURL_LIBRARY_DIR}/transfer.c" "${CURL_LIBRARY_DIR}/strcase.c" "${CURL_LIBRARY_DIR}/easy.c" "${CURL_LIBRARY_DIR}/security.c" "${CURL_LIBRARY_DIR}/curl_fnmatch.c"
"${CURL_LIBRARY_DIR}/fileinfo.c" "${CURL_LIBRARY_DIR}/wildcard.c" "${CURL_LIBRARY_DIR}/krb5.c" "${CURL_LIBRARY_DIR}/memdebug.c" "${CURL_LIBRARY_DIR}/http_chunks.c"
"${CURL_LIBRARY_DIR}/strtok.c" "${CURL_LIBRARY_DIR}/connect.c" "${CURL_LIBRARY_DIR}/llist.c" "${CURL_LIBRARY_DIR}/hash.c" "${CURL_LIBRARY_DIR}/multi.c" "${CURL_LIBRARY_DIR}/content_encoding.c" "${CURL_LIBRARY_DIR}/share.c"
"${CURL_LIBRARY_DIR}/http_digest.c" "${CURL_LIBRARY_DIR}/md4.c" "${CURL_LIBRARY_DIR}/md5.c" "${CURL_LIBRARY_DIR}/http_negotiate.c" "${CURL_LIBRARY_DIR}/inet_pton.c" "${CURL_LIBRARY_DIR}/strtoofft.c"
"${CURL_LIBRARY_DIR}/strerror.c" "${CURL_LIBRARY_DIR}/amigaos.c" "${CURL_LIBRARY_DIR}/hostasyn.c" "${CURL_LIBRARY_DIR}/hostip4.c" "${CURL_LIBRARY_DIR}/hostip6.c" "${CURL_LIBRARY_DIR}/hostsyn.c"
"${CURL_LIBRARY_DIR}/inet_ntop.c" "${CURL_LIBRARY_DIR}/parsedate.c" "${CURL_LIBRARY_DIR}/select.c" "${CURL_LIBRARY_DIR}/splay.c" "${CURL_LIBRARY_DIR}/strdup.c" "${CURL_LIBRARY_DIR}/socks.c"
"${CURL_LIBRARY_DIR}/curl_addrinfo.c" "${CURL_LIBRARY_DIR}/socks_gssapi.c" "${CURL_LIBRARY_DIR}/socks_sspi.c"
"${CURL_LIBRARY_DIR}/curl_sspi.c" "${CURL_LIBRARY_DIR}/slist.c" "${CURL_LIBRARY_DIR}/nonblock.c" "${CURL_LIBRARY_DIR}/curl_memrchr.c" "${CURL_LIBRARY_DIR}/imap.c" "${CURL_LIBRARY_DIR}/pop3.c" "${CURL_LIBRARY_DIR}/smtp.c"
"${CURL_LIBRARY_DIR}/pingpong.c" "${CURL_LIBRARY_DIR}/rtsp.c" "${CURL_LIBRARY_DIR}/curl_threads.c" "${CURL_LIBRARY_DIR}/warnless.c" "${CURL_LIBRARY_DIR}/hmac.c" "${CURL_LIBRARY_DIR}/curl_rtmp.c"
"${CURL_LIBRARY_DIR}/openldap.c" "${CURL_LIBRARY_DIR}/curl_gethostname.c" "${CURL_LIBRARY_DIR}/gopher.c" "${CURL_LIBRARY_DIR}/idn_win32.c"
"${CURL_LIBRARY_DIR}/http_proxy.c" "${CURL_LIBRARY_DIR}/non-ascii.c" "${CURL_LIBRARY_DIR}/asyn-ares.c" "${CURL_LIBRARY_DIR}/asyn-thread.c" "${CURL_LIBRARY_DIR}/curl_gssapi.c"
"${CURL_LIBRARY_DIR}/http_ntlm.c" "${CURL_LIBRARY_DIR}/curl_ntlm_wb.c" "${CURL_LIBRARY_DIR}/curl_ntlm_core.c" "${CURL_LIBRARY_DIR}/curl_sasl.c" "${CURL_LIBRARY_DIR}/rand.c"
"${CURL_LIBRARY_DIR}/curl_multibyte.c" "${CURL_LIBRARY_DIR}/hostcheck.c" "${CURL_LIBRARY_DIR}/conncache.c" "${CURL_LIBRARY_DIR}/dotdot.c"
"${CURL_LIBRARY_DIR}/x509asn1.c" "${CURL_LIBRARY_DIR}/http2.c" "${CURL_LIBRARY_DIR}/smb.c" "${CURL_LIBRARY_DIR}/curl_endian.c" "${CURL_LIBRARY_DIR}/curl_des.c" "${CURL_LIBRARY_DIR}/system_win32.c"
"${CURL_LIBRARY_DIR}/mime.c" "${CURL_LIBRARY_DIR}/sha256.c" "${CURL_LIBRARY_DIR}/setopt.c" "${CURL_LIBRARY_DIR}/curl_path.c" "${CURL_LIBRARY_DIR}/curl_ctype.c" "${CURL_LIBRARY_DIR}/curl_range.c" "${CURL_LIBRARY_DIR}/psl.c"
"${CURL_LIBRARY_DIR}/doh.c" "${CURL_LIBRARY_DIR}/urlapi.c" "${CURL_LIBRARY_DIR}/curl_get_line.c" "${CURL_LIBRARY_DIR}/altsvc.c" "${CURL_LIBRARY_DIR}/socketpair.c")
SET(LIB_HFILES "${CURL_LIBRARY_DIR}/arpa_telnet.h" "${CURL_LIBRARY_DIR}/netrc.h" "${CURL_LIBRARY_DIR}/file.h" "${CURL_LIBRARY_DIR}/timeval.h" "${CURL_LIBRARY_DIR}/hostip.h" "${CURL_LIBRARY_DIR}/progress.h"
"${CURL_LIBRARY_DIR}/formdata.h" "${CURL_LIBRARY_DIR}/cookie.h" "${CURL_LIBRARY_DIR}/http.h" "${CURL_LIBRARY_DIR}/sendf.h" "${CURL_LIBRARY_DIR}/url.h" "${CURL_LIBRARY_DIR}/dict.h" "${CURL_LIBRARY_DIR}/if2ip.h"
"${CURL_LIBRARY_DIR}/speedcheck.h" "${CURL_LIBRARY_DIR}/urldata.h" "${CURL_LIBRARY_DIR}/curl_ldap.h" "${CURL_LIBRARY_DIR}/escape.h" "${CURL_LIBRARY_DIR}/telnet.h" "${CURL_LIBRARY_DIR}/getinfo.h"
"${CURL_LIBRARY_DIR}/strcase.h" "${CURL_LIBRARY_DIR}/curl_sec.h" "${CURL_LIBRARY_DIR}/memdebug.h" "${CURL_LIBRARY_DIR}/http_chunks.h" "${CURL_LIBRARY_DIR}/curl_fnmatch.h"
"${CURL_LIBRARY_DIR}/wildcard.h" "${CURL_LIBRARY_DIR}/fileinfo.h" "${CURL_LIBRARY_DIR}/strtok.h" "${CURL_LIBRARY_DIR}/connect.h" "${CURL_LIBRARY_DIR}/llist.h"
"${CURL_LIBRARY_DIR}/hash.h" "${CURL_LIBRARY_DIR}/content_encoding.h" "${CURL_LIBRARY_DIR}/share.h" "${CURL_LIBRARY_DIR}/curl_md4.h" "${CURL_LIBRARY_DIR}/curl_md5.h" "${CURL_LIBRARY_DIR}/http_digest.h"
"${CURL_LIBRARY_DIR}/http_negotiate.h" "${CURL_LIBRARY_DIR}/inet_pton.h" "${CURL_LIBRARY_DIR}/amigaos.h" "${CURL_LIBRARY_DIR}/strtoofft.h" "${CURL_LIBRARY_DIR}/strerror.h"
"${CURL_LIBRARY_DIR}/inet_ntop.h" "${CURL_LIBRARY_DIR}/curlx.h" "${CURL_LIBRARY_DIR}/curl_memory.h" "${CURL_LIBRARY_DIR}/curl_setup.h" "${CURL_LIBRARY_DIR}/transfer.h" "${CURL_LIBRARY_DIR}/select.h"
"${CURL_LIBRARY_DIR}/easyif.h" "${CURL_LIBRARY_DIR}/multiif.h" "${CURL_LIBRARY_DIR}/parsedate.h" "${CURL_LIBRARY_DIR}/sockaddr.h" "${CURL_LIBRARY_DIR}/splay.h" "${CURL_LIBRARY_DIR}/strdup.h"
"${CURL_LIBRARY_DIR}/socks.h" "${CURL_LIBRARY_DIR}/curl_base64.h" "${CURL_LIBRARY_DIR}/curl_addrinfo.h" "${CURL_LIBRARY_DIR}/curl_sspi.h"
"${CURL_LIBRARY_DIR}/slist.h" "${CURL_LIBRARY_DIR}/nonblock.h" "${CURL_LIBRARY_DIR}/curl_memrchr.h" "${CURL_LIBRARY_DIR}/imap.h" "${CURL_LIBRARY_DIR}/pop3.h" "${CURL_LIBRARY_DIR}/smtp.h" "${CURL_LIBRARY_DIR}/pingpong.h"
"${CURL_LIBRARY_DIR}/rtsp.h" "${CURL_LIBRARY_DIR}/curl_threads.h" "${CURL_LIBRARY_DIR}/warnless.h" "${CURL_LIBRARY_DIR}/curl_hmac.h" "${CURL_LIBRARY_DIR}/curl_rtmp.h"
"${CURL_LIBRARY_DIR}/curl_gethostname.h" "${CURL_LIBRARY_DIR}/gopher.h" "${CURL_LIBRARY_DIR}/http_proxy.h" "${CURL_LIBRARY_DIR}/non-ascii.h" "${CURL_LIBRARY_DIR}/asyn.h"
"${CURL_LIBRARY_DIR}/http_ntlm.h" "${CURL_LIBRARY_DIR}/curl_gssapi.h" "${CURL_LIBRARY_DIR}/curl_ntlm_wb.h" "${CURL_LIBRARY_DIR}/curl_ntlm_core.h"
"${CURL_LIBRARY_DIR}/curl_sasl.h" "${CURL_LIBRARY_DIR}/curl_multibyte.h" "${CURL_LIBRARY_DIR}/hostcheck.h" "${CURL_LIBRARY_DIR}/conncache.h"
"${CURL_LIBRARY_DIR}/multihandle.h" "${CURL_LIBRARY_DIR}/setup-vms.h" "${CURL_LIBRARY_DIR}/dotdot.h"
"${CURL_LIBRARY_DIR}/x509asn1.h" "${CURL_LIBRARY_DIR}/http2.h" "${CURL_LIBRARY_DIR}/sigpipe.h" "${CURL_LIBRARY_DIR}/smb.h" "${CURL_LIBRARY_DIR}/curl_endian.h" "${CURL_LIBRARY_DIR}/curl_des.h"
"${CURL_LIBRARY_DIR}/curl_printf.h" "${CURL_LIBRARY_DIR}/system_win32.h" "${CURL_LIBRARY_DIR}/rand.h" "${CURL_LIBRARY_DIR}/mime.h" "${CURL_LIBRARY_DIR}/curl_sha256.h" "${CURL_LIBRARY_DIR}/setopt.h"
"${CURL_LIBRARY_DIR}/curl_path.h" "${CURL_LIBRARY_DIR}/curl_ctype.h" "${CURL_LIBRARY_DIR}/curl_range.h" "${CURL_LIBRARY_DIR}/psl.h" "${CURL_LIBRARY_DIR}/doh.h" "${CURL_LIBRARY_DIR}/urlapi-int.h"
"${CURL_LIBRARY_DIR}/curl_get_line.h" "${CURL_LIBRARY_DIR}/altsvc.h" "${CURL_LIBRARY_DIR}/quic.h" "${CURL_LIBRARY_DIR}/socketpair.h")
SET(LIB_RCFILES "${CURL_LIBRARY_DIR}/libcurl.rc")
SET(CSOURCES ${LIB_CFILES} ${LIB_VAUTH_CFILES} ${LIB_VTLS_CFILES}
${LIB_VQUIC_CFILES} ${LIB_VSSH_CFILES})
SET(HHEADERS ${LIB_HFILES} ${LIB_VAUTH_HFILES} ${LIB_VTLS_HFILES}
${LIB_VQUIC_HFILES} ${LIB_VSSH_HFILES})
configure_file(${CURL_SOURCE_DIR}/lib/curl_config.h.cmake
${CMAKE_CURRENT_BINARY_DIR}/curl/curl_config.h)
list(APPEND HHEADERS
${CMAKE_CURRENT_BINARY_DIR}/curl/curl_config.h
)
add_library(libcurl ${HHEADERS} ${CSOURCES})
if(NOT BUILD_SHARED_LIBS)
set_target_properties(libcurl PROPERTIES INTERFACE_COMPILE_DEFINITIONS CURL_STATICLIB)
endif()
if(HIDES_CURL_PRIVATE_SYMBOLS)
set_property(TARGET libcurl APPEND PROPERTY COMPILE_DEFINITIONS "CURL_HIDDEN_SYMBOLS")
set_property(TARGET libcurl APPEND PROPERTY COMPILE_FLAGS ${CURL_CFLAG_SYMBOLS_HIDE})
endif()
if(OPENSSL_FOUND)
target_include_directories(libcurl PUBLIC ${OPENSSL_INCLUDE_DIR})
message("-- Including openssl ${OPENSSL_INCLUDE_DIR} to curl")
endif()
if(ZLIB_FOUND)
target_include_directories(libcurl PUBLIC ${ZLIB_INCLUDE_DIRS}})
message("-- Including zlib ${ZLIB_INCLUDE_DIRS} to curl")
endif()
target_compile_definitions(libcurl PUBLIC -DHAVE_CONFIG_H)
target_compile_definitions(libcurl PUBLIC -DBUILDING_LIBCURL)
target_include_directories(libcurl PUBLIC "${CURL_SOURCE_DIR}/include" "${CURL_LIBRARY_DIR}" "${CMAKE_CURRENT_BINARY_DIR}/curl")
target_link_libraries(libcurl ${CURL_LIBS})

1
contrib/icu vendored Submodule

@ -0,0 +1 @@
Subproject commit faa2f9f9e1fe74c5ed00eba371d2830134cdbea1

View File

@ -0,0 +1,459 @@
set(ICU_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source)
set(ICUDATA_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/icudata/)
# These lists of sources were generated from build log of the original ICU build system (configure + make).
set(ICUUC_SOURCES
${ICU_SOURCE_DIR}/common/errorcode.cpp
${ICU_SOURCE_DIR}/common/putil.cpp
${ICU_SOURCE_DIR}/common/umath.cpp
${ICU_SOURCE_DIR}/common/utypes.cpp
${ICU_SOURCE_DIR}/common/uinvchar.cpp
${ICU_SOURCE_DIR}/common/umutex.cpp
${ICU_SOURCE_DIR}/common/ucln_cmn.cpp
${ICU_SOURCE_DIR}/common/uinit.cpp
${ICU_SOURCE_DIR}/common/uobject.cpp
${ICU_SOURCE_DIR}/common/cmemory.cpp
${ICU_SOURCE_DIR}/common/charstr.cpp
${ICU_SOURCE_DIR}/common/cstr.cpp
${ICU_SOURCE_DIR}/common/udata.cpp
${ICU_SOURCE_DIR}/common/ucmndata.cpp
${ICU_SOURCE_DIR}/common/udatamem.cpp
${ICU_SOURCE_DIR}/common/umapfile.cpp
${ICU_SOURCE_DIR}/common/udataswp.cpp
${ICU_SOURCE_DIR}/common/utrie_swap.cpp
${ICU_SOURCE_DIR}/common/ucol_swp.cpp
${ICU_SOURCE_DIR}/common/utrace.cpp
${ICU_SOURCE_DIR}/common/uhash.cpp
${ICU_SOURCE_DIR}/common/uhash_us.cpp
${ICU_SOURCE_DIR}/common/uenum.cpp
${ICU_SOURCE_DIR}/common/ustrenum.cpp
${ICU_SOURCE_DIR}/common/uvector.cpp
${ICU_SOURCE_DIR}/common/ustack.cpp
${ICU_SOURCE_DIR}/common/uvectr32.cpp
${ICU_SOURCE_DIR}/common/uvectr64.cpp
${ICU_SOURCE_DIR}/common/ucnv.cpp
${ICU_SOURCE_DIR}/common/ucnv_bld.cpp
${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp
${ICU_SOURCE_DIR}/common/ucnv_io.cpp
${ICU_SOURCE_DIR}/common/ucnv_cb.cpp
${ICU_SOURCE_DIR}/common/ucnv_err.cpp
${ICU_SOURCE_DIR}/common/ucnvlat1.cpp
${ICU_SOURCE_DIR}/common/ucnv_u7.cpp
${ICU_SOURCE_DIR}/common/ucnv_u8.cpp
${ICU_SOURCE_DIR}/common/ucnv_u16.cpp
${ICU_SOURCE_DIR}/common/ucnv_u32.cpp
${ICU_SOURCE_DIR}/common/ucnvscsu.cpp
${ICU_SOURCE_DIR}/common/ucnvbocu.cpp
${ICU_SOURCE_DIR}/common/ucnv_ext.cpp
${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp
${ICU_SOURCE_DIR}/common/ucnv2022.cpp
${ICU_SOURCE_DIR}/common/ucnvhz.cpp
${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp
${ICU_SOURCE_DIR}/common/ucnvisci.cpp
${ICU_SOURCE_DIR}/common/ucnvdisp.cpp
${ICU_SOURCE_DIR}/common/ucnv_set.cpp
${ICU_SOURCE_DIR}/common/ucnv_ct.cpp
${ICU_SOURCE_DIR}/common/resource.cpp
${ICU_SOURCE_DIR}/common/uresbund.cpp
${ICU_SOURCE_DIR}/common/ures_cnv.cpp
${ICU_SOURCE_DIR}/common/uresdata.cpp
${ICU_SOURCE_DIR}/common/resbund.cpp
${ICU_SOURCE_DIR}/common/resbund_cnv.cpp
${ICU_SOURCE_DIR}/common/ucurr.cpp
${ICU_SOURCE_DIR}/common/localebuilder.cpp
${ICU_SOURCE_DIR}/common/localeprioritylist.cpp
${ICU_SOURCE_DIR}/common/messagepattern.cpp
${ICU_SOURCE_DIR}/common/ucat.cpp
${ICU_SOURCE_DIR}/common/locmap.cpp
${ICU_SOURCE_DIR}/common/uloc.cpp
${ICU_SOURCE_DIR}/common/locid.cpp
${ICU_SOURCE_DIR}/common/locutil.cpp
${ICU_SOURCE_DIR}/common/locavailable.cpp
${ICU_SOURCE_DIR}/common/locdispnames.cpp
${ICU_SOURCE_DIR}/common/locdspnm.cpp
${ICU_SOURCE_DIR}/common/loclikely.cpp
${ICU_SOURCE_DIR}/common/locresdata.cpp
${ICU_SOURCE_DIR}/common/lsr.cpp
${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp
${ICU_SOURCE_DIR}/common/locdistance.cpp
${ICU_SOURCE_DIR}/common/localematcher.cpp
${ICU_SOURCE_DIR}/common/bytestream.cpp
${ICU_SOURCE_DIR}/common/stringpiece.cpp
${ICU_SOURCE_DIR}/common/bytesinkutil.cpp
${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp
${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp
${ICU_SOURCE_DIR}/common/bytestrie.cpp
${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp
${ICU_SOURCE_DIR}/common/ucharstrie.cpp
${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp
${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp
${ICU_SOURCE_DIR}/common/dictionarydata.cpp
${ICU_SOURCE_DIR}/common/edits.cpp
${ICU_SOURCE_DIR}/common/appendable.cpp
${ICU_SOURCE_DIR}/common/ustr_cnv.cpp
${ICU_SOURCE_DIR}/common/unistr_cnv.cpp
${ICU_SOURCE_DIR}/common/unistr.cpp
${ICU_SOURCE_DIR}/common/unistr_case.cpp
${ICU_SOURCE_DIR}/common/unistr_props.cpp
${ICU_SOURCE_DIR}/common/utf_impl.cpp
${ICU_SOURCE_DIR}/common/ustring.cpp
${ICU_SOURCE_DIR}/common/ustrcase.cpp
${ICU_SOURCE_DIR}/common/ucasemap.cpp
${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp
${ICU_SOURCE_DIR}/common/cstring.cpp
${ICU_SOURCE_DIR}/common/ustrfmt.cpp
${ICU_SOURCE_DIR}/common/ustrtrns.cpp
${ICU_SOURCE_DIR}/common/ustr_wcs.cpp
${ICU_SOURCE_DIR}/common/utext.cpp
${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp
${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp
${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp
${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp
${ICU_SOURCE_DIR}/common/normalizer2impl.cpp
${ICU_SOURCE_DIR}/common/normalizer2.cpp
${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp
${ICU_SOURCE_DIR}/common/normlzr.cpp
${ICU_SOURCE_DIR}/common/unorm.cpp
${ICU_SOURCE_DIR}/common/unormcmp.cpp
${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp
${ICU_SOURCE_DIR}/common/chariter.cpp
${ICU_SOURCE_DIR}/common/schriter.cpp
${ICU_SOURCE_DIR}/common/uchriter.cpp
${ICU_SOURCE_DIR}/common/uiter.cpp
${ICU_SOURCE_DIR}/common/patternprops.cpp
${ICU_SOURCE_DIR}/common/uchar.cpp
${ICU_SOURCE_DIR}/common/uprops.cpp
${ICU_SOURCE_DIR}/common/ucase.cpp
${ICU_SOURCE_DIR}/common/propname.cpp
${ICU_SOURCE_DIR}/common/ubidi_props.cpp
${ICU_SOURCE_DIR}/common/characterproperties.cpp
${ICU_SOURCE_DIR}/common/ubidi.cpp
${ICU_SOURCE_DIR}/common/ubidiwrt.cpp
${ICU_SOURCE_DIR}/common/ubidiln.cpp
${ICU_SOURCE_DIR}/common/ushape.cpp
${ICU_SOURCE_DIR}/common/uscript.cpp
${ICU_SOURCE_DIR}/common/uscript_props.cpp
${ICU_SOURCE_DIR}/common/usc_impl.cpp
${ICU_SOURCE_DIR}/common/unames.cpp
${ICU_SOURCE_DIR}/common/utrie.cpp
${ICU_SOURCE_DIR}/common/utrie2.cpp
${ICU_SOURCE_DIR}/common/utrie2_builder.cpp
${ICU_SOURCE_DIR}/common/ucptrie.cpp
${ICU_SOURCE_DIR}/common/umutablecptrie.cpp
${ICU_SOURCE_DIR}/common/bmpset.cpp
${ICU_SOURCE_DIR}/common/unisetspan.cpp
${ICU_SOURCE_DIR}/common/uset_props.cpp
${ICU_SOURCE_DIR}/common/uniset_props.cpp
${ICU_SOURCE_DIR}/common/uniset_closure.cpp
${ICU_SOURCE_DIR}/common/uset.cpp
${ICU_SOURCE_DIR}/common/uniset.cpp
${ICU_SOURCE_DIR}/common/usetiter.cpp
${ICU_SOURCE_DIR}/common/ruleiter.cpp
${ICU_SOURCE_DIR}/common/caniter.cpp
${ICU_SOURCE_DIR}/common/unifilt.cpp
${ICU_SOURCE_DIR}/common/unifunct.cpp
${ICU_SOURCE_DIR}/common/uarrsort.cpp
${ICU_SOURCE_DIR}/common/brkiter.cpp
${ICU_SOURCE_DIR}/common/ubrk.cpp
${ICU_SOURCE_DIR}/common/brkeng.cpp
${ICU_SOURCE_DIR}/common/dictbe.cpp
${ICU_SOURCE_DIR}/common/filteredbrk.cpp
${ICU_SOURCE_DIR}/common/rbbi.cpp
${ICU_SOURCE_DIR}/common/rbbidata.cpp
${ICU_SOURCE_DIR}/common/rbbinode.cpp
${ICU_SOURCE_DIR}/common/rbbirb.cpp
${ICU_SOURCE_DIR}/common/rbbiscan.cpp
${ICU_SOURCE_DIR}/common/rbbisetb.cpp
${ICU_SOURCE_DIR}/common/rbbistbl.cpp
${ICU_SOURCE_DIR}/common/rbbitblb.cpp
${ICU_SOURCE_DIR}/common/rbbi_cache.cpp
${ICU_SOURCE_DIR}/common/serv.cpp
${ICU_SOURCE_DIR}/common/servnotf.cpp
${ICU_SOURCE_DIR}/common/servls.cpp
${ICU_SOURCE_DIR}/common/servlk.cpp
${ICU_SOURCE_DIR}/common/servlkf.cpp
${ICU_SOURCE_DIR}/common/servrbf.cpp
${ICU_SOURCE_DIR}/common/servslkf.cpp
${ICU_SOURCE_DIR}/common/uidna.cpp
${ICU_SOURCE_DIR}/common/usprep.cpp
${ICU_SOURCE_DIR}/common/uts46.cpp
${ICU_SOURCE_DIR}/common/punycode.cpp
${ICU_SOURCE_DIR}/common/util.cpp
${ICU_SOURCE_DIR}/common/util_props.cpp
${ICU_SOURCE_DIR}/common/parsepos.cpp
${ICU_SOURCE_DIR}/common/locbased.cpp
${ICU_SOURCE_DIR}/common/cwchar.cpp
${ICU_SOURCE_DIR}/common/wintz.cpp
${ICU_SOURCE_DIR}/common/dtintrv.cpp
${ICU_SOURCE_DIR}/common/ucnvsel.cpp
${ICU_SOURCE_DIR}/common/propsvec.cpp
${ICU_SOURCE_DIR}/common/ulist.cpp
${ICU_SOURCE_DIR}/common/uloc_tag.cpp
${ICU_SOURCE_DIR}/common/icudataver.cpp
${ICU_SOURCE_DIR}/common/icuplug.cpp
${ICU_SOURCE_DIR}/common/sharedobject.cpp
${ICU_SOURCE_DIR}/common/simpleformatter.cpp
${ICU_SOURCE_DIR}/common/unifiedcache.cpp
${ICU_SOURCE_DIR}/common/uloc_keytype.cpp
${ICU_SOURCE_DIR}/common/ubiditransform.cpp
${ICU_SOURCE_DIR}/common/pluralmap.cpp
${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp
${ICU_SOURCE_DIR}/common/restrace.cpp)
set(ICUI18N_SOURCES
${ICU_SOURCE_DIR}/i18n/ucln_in.cpp
${ICU_SOURCE_DIR}/i18n/fmtable.cpp
${ICU_SOURCE_DIR}/i18n/format.cpp
${ICU_SOURCE_DIR}/i18n/msgfmt.cpp
${ICU_SOURCE_DIR}/i18n/umsg.cpp
${ICU_SOURCE_DIR}/i18n/numfmt.cpp
${ICU_SOURCE_DIR}/i18n/unum.cpp
${ICU_SOURCE_DIR}/i18n/decimfmt.cpp
${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp
${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp
${ICU_SOURCE_DIR}/i18n/choicfmt.cpp
${ICU_SOURCE_DIR}/i18n/datefmt.cpp
${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp
${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp
${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp
${ICU_SOURCE_DIR}/i18n/udat.cpp
${ICU_SOURCE_DIR}/i18n/dtptngen.cpp
${ICU_SOURCE_DIR}/i18n/udatpg.cpp
${ICU_SOURCE_DIR}/i18n/nfrs.cpp
${ICU_SOURCE_DIR}/i18n/nfrule.cpp
${ICU_SOURCE_DIR}/i18n/nfsubs.cpp
${ICU_SOURCE_DIR}/i18n/rbnf.cpp
${ICU_SOURCE_DIR}/i18n/numsys.cpp
${ICU_SOURCE_DIR}/i18n/unumsys.cpp
${ICU_SOURCE_DIR}/i18n/ucsdet.cpp
${ICU_SOURCE_DIR}/i18n/ucal.cpp
${ICU_SOURCE_DIR}/i18n/calendar.cpp
${ICU_SOURCE_DIR}/i18n/gregocal.cpp
${ICU_SOURCE_DIR}/i18n/timezone.cpp
${ICU_SOURCE_DIR}/i18n/simpletz.cpp
${ICU_SOURCE_DIR}/i18n/olsontz.cpp
${ICU_SOURCE_DIR}/i18n/astro.cpp
${ICU_SOURCE_DIR}/i18n/taiwncal.cpp
${ICU_SOURCE_DIR}/i18n/buddhcal.cpp
${ICU_SOURCE_DIR}/i18n/persncal.cpp
${ICU_SOURCE_DIR}/i18n/islamcal.cpp
${ICU_SOURCE_DIR}/i18n/japancal.cpp
${ICU_SOURCE_DIR}/i18n/gregoimp.cpp
${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp
${ICU_SOURCE_DIR}/i18n/indiancal.cpp
${ICU_SOURCE_DIR}/i18n/chnsecal.cpp
${ICU_SOURCE_DIR}/i18n/cecal.cpp
${ICU_SOURCE_DIR}/i18n/coptccal.cpp
${ICU_SOURCE_DIR}/i18n/dangical.cpp
${ICU_SOURCE_DIR}/i18n/ethpccal.cpp
${ICU_SOURCE_DIR}/i18n/coleitr.cpp
${ICU_SOURCE_DIR}/i18n/coll.cpp
${ICU_SOURCE_DIR}/i18n/sortkey.cpp
${ICU_SOURCE_DIR}/i18n/bocsu.cpp
${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp
${ICU_SOURCE_DIR}/i18n/ucol.cpp
${ICU_SOURCE_DIR}/i18n/ucol_res.cpp
${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp
${ICU_SOURCE_DIR}/i18n/collation.cpp
${ICU_SOURCE_DIR}/i18n/collationsettings.cpp
${ICU_SOURCE_DIR}/i18n/collationdata.cpp
${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp
${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp
${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp
${ICU_SOURCE_DIR}/i18n/collationfcd.cpp
${ICU_SOURCE_DIR}/i18n/collationiterator.cpp
${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp
${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp
${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp
${ICU_SOURCE_DIR}/i18n/collationsets.cpp
${ICU_SOURCE_DIR}/i18n/collationcompare.cpp
${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp
${ICU_SOURCE_DIR}/i18n/collationkeys.cpp
${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp
${ICU_SOURCE_DIR}/i18n/collationroot.cpp
${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp
${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp
${ICU_SOURCE_DIR}/i18n/collationweights.cpp
${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp
${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp
${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp
${ICU_SOURCE_DIR}/i18n/listformatter.cpp
${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp
${ICU_SOURCE_DIR}/i18n/strmatch.cpp
${ICU_SOURCE_DIR}/i18n/usearch.cpp
${ICU_SOURCE_DIR}/i18n/search.cpp
${ICU_SOURCE_DIR}/i18n/stsearch.cpp
${ICU_SOURCE_DIR}/i18n/translit.cpp
${ICU_SOURCE_DIR}/i18n/utrans.cpp
${ICU_SOURCE_DIR}/i18n/esctrn.cpp
${ICU_SOURCE_DIR}/i18n/unesctrn.cpp
${ICU_SOURCE_DIR}/i18n/funcrepl.cpp
${ICU_SOURCE_DIR}/i18n/strrepl.cpp
${ICU_SOURCE_DIR}/i18n/tridpars.cpp
${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp
${ICU_SOURCE_DIR}/i18n/rbt.cpp
${ICU_SOURCE_DIR}/i18n/rbt_data.cpp
${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp
${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp
${ICU_SOURCE_DIR}/i18n/rbt_set.cpp
${ICU_SOURCE_DIR}/i18n/nultrans.cpp
${ICU_SOURCE_DIR}/i18n/remtrans.cpp
${ICU_SOURCE_DIR}/i18n/casetrn.cpp
${ICU_SOURCE_DIR}/i18n/titletrn.cpp
${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp
${ICU_SOURCE_DIR}/i18n/toupptrn.cpp
${ICU_SOURCE_DIR}/i18n/anytrans.cpp
${ICU_SOURCE_DIR}/i18n/name2uni.cpp
${ICU_SOURCE_DIR}/i18n/uni2name.cpp
${ICU_SOURCE_DIR}/i18n/nortrans.cpp
${ICU_SOURCE_DIR}/i18n/quant.cpp
${ICU_SOURCE_DIR}/i18n/transreg.cpp
${ICU_SOURCE_DIR}/i18n/brktrans.cpp
${ICU_SOURCE_DIR}/i18n/regexcmp.cpp
${ICU_SOURCE_DIR}/i18n/rematch.cpp
${ICU_SOURCE_DIR}/i18n/repattrn.cpp
${ICU_SOURCE_DIR}/i18n/regexst.cpp
${ICU_SOURCE_DIR}/i18n/regextxt.cpp
${ICU_SOURCE_DIR}/i18n/regeximp.cpp
${ICU_SOURCE_DIR}/i18n/uregex.cpp
${ICU_SOURCE_DIR}/i18n/uregexc.cpp
${ICU_SOURCE_DIR}/i18n/ulocdata.cpp
${ICU_SOURCE_DIR}/i18n/measfmt.cpp
${ICU_SOURCE_DIR}/i18n/currfmt.cpp
${ICU_SOURCE_DIR}/i18n/curramt.cpp
${ICU_SOURCE_DIR}/i18n/currunit.cpp
${ICU_SOURCE_DIR}/i18n/measure.cpp
${ICU_SOURCE_DIR}/i18n/utmscale.cpp
${ICU_SOURCE_DIR}/i18n/csdetect.cpp
${ICU_SOURCE_DIR}/i18n/csmatch.cpp
${ICU_SOURCE_DIR}/i18n/csr2022.cpp
${ICU_SOURCE_DIR}/i18n/csrecog.cpp
${ICU_SOURCE_DIR}/i18n/csrmbcs.cpp
${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp
${ICU_SOURCE_DIR}/i18n/csrucode.cpp
${ICU_SOURCE_DIR}/i18n/csrutf8.cpp
${ICU_SOURCE_DIR}/i18n/inputext.cpp
${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp
${ICU_SOURCE_DIR}/i18n/windtfmt.cpp
${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp
${ICU_SOURCE_DIR}/i18n/basictz.cpp
${ICU_SOURCE_DIR}/i18n/dtrule.cpp
${ICU_SOURCE_DIR}/i18n/rbtz.cpp
${ICU_SOURCE_DIR}/i18n/tzrule.cpp
${ICU_SOURCE_DIR}/i18n/tztrans.cpp
${ICU_SOURCE_DIR}/i18n/vtzone.cpp
${ICU_SOURCE_DIR}/i18n/zonemeta.cpp
${ICU_SOURCE_DIR}/i18n/standardplural.cpp
${ICU_SOURCE_DIR}/i18n/upluralrules.cpp
${ICU_SOURCE_DIR}/i18n/plurrule.cpp
${ICU_SOURCE_DIR}/i18n/plurfmt.cpp
${ICU_SOURCE_DIR}/i18n/selfmt.cpp
${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp
${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp
${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp
${ICU_SOURCE_DIR}/i18n/tmunit.cpp
${ICU_SOURCE_DIR}/i18n/tmutamt.cpp
${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp
${ICU_SOURCE_DIR}/i18n/currpinf.cpp
${ICU_SOURCE_DIR}/i18n/uspoof.cpp
${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp
${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp
${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp
${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp
${ICU_SOURCE_DIR}/i18n/ztrans.cpp
${ICU_SOURCE_DIR}/i18n/zrule.cpp
${ICU_SOURCE_DIR}/i18n/vzone.cpp
${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp
${ICU_SOURCE_DIR}/i18n/fpositer.cpp
${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp
${ICU_SOURCE_DIR}/i18n/decNumber.cpp
${ICU_SOURCE_DIR}/i18n/decContext.cpp
${ICU_SOURCE_DIR}/i18n/alphaindex.cpp
${ICU_SOURCE_DIR}/i18n/tznames.cpp
${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp
${ICU_SOURCE_DIR}/i18n/tzgnames.cpp
${ICU_SOURCE_DIR}/i18n/tzfmt.cpp
${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp
${ICU_SOURCE_DIR}/i18n/gender.cpp
${ICU_SOURCE_DIR}/i18n/region.cpp
${ICU_SOURCE_DIR}/i18n/scriptset.cpp
${ICU_SOURCE_DIR}/i18n/uregion.cpp
${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp
${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp
${ICU_SOURCE_DIR}/i18n/measunit.cpp
${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp
${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp
${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp
${ICU_SOURCE_DIR}/i18n/nounit.cpp
${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp
${ICU_SOURCE_DIR}/i18n/number_compact.cpp
${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp
${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp
${ICU_SOURCE_DIR}/i18n/number_fluent.cpp
${ICU_SOURCE_DIR}/i18n/number_formatimpl.cpp
${ICU_SOURCE_DIR}/i18n/number_grouping.cpp
${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp
${ICU_SOURCE_DIR}/i18n/number_longnames.cpp
${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp
${ICU_SOURCE_DIR}/i18n/number_notation.cpp
${ICU_SOURCE_DIR}/i18n/number_output.cpp
${ICU_SOURCE_DIR}/i18n/number_padding.cpp
${ICU_SOURCE_DIR}/i18n/number_patternmodifier.cpp
${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp
${ICU_SOURCE_DIR}/i18n/number_rounding.cpp
${ICU_SOURCE_DIR}/i18n/number_scientific.cpp
${ICU_SOURCE_DIR}/i18n/number_utils.cpp
${ICU_SOURCE_DIR}/i18n/number_asformat.cpp
${ICU_SOURCE_DIR}/i18n/number_mapper.cpp
${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp
${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp
${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp
${ICU_SOURCE_DIR}/i18n/number_capi.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp
${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp
${ICU_SOURCE_DIR}/i18n/string_segment.cpp
${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp
${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp
${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp
${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp
${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp
${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp
${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp
${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp
${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp
${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp
${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp
${ICU_SOURCE_DIR}/i18n/erarules.cpp
${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp
${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp
${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp
${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp)
enable_language(ASM)
set(ICUDATA_SOURCES ${ICUDATA_SOURCE_DIR}/icudt66l_dat.S)
# Note that we don't like any kind of binary plugins (because of runtime dependencies, vulnerabilities, ABI incompatibilities).
add_definitions(-D_REENTRANT -DU_HAVE_ELF_H=1 -DU_HAVE_STRTOD_L=1 -DU_HAVE_XLOCALE_H=0 -DDEFAULT_ICU_PLUGINS="/dev/null")
add_library(icuuc ${ICUUC_SOURCES})
add_library(icui18n ${ICUI18N_SOURCES})
add_library(icudata ${ICUDATA_SOURCES})
target_link_libraries(icuuc icudata)
target_link_libraries(icui18n icuuc)
target_include_directories(icuuc SYSTEM PUBLIC ${ICU_SOURCE_DIR}/common/)
target_include_directories(icui18n SYSTEM PUBLIC ${ICU_SOURCE_DIR}/i18n/)
target_compile_definitions(icuuc PRIVATE -DU_COMMON_IMPLEMENTATION)
target_compile_definitions(icui18n PRIVATE -DU_I18N_IMPLEMENTATION)
if (COMPILER_CLANG)
target_compile_options(icudata PRIVATE -Wno-unused-command-line-argument)
endif ()

1
contrib/icudata vendored Submodule

@ -0,0 +1 @@
Subproject commit f020820388e3faafb44cc643574a2d563dfde572

View File

@ -297,7 +297,7 @@
* MADV_FREE, though typically with higher * MADV_FREE, though typically with higher
* system overhead. * system overhead.
*/ */
#define JEMALLOC_PURGE_MADVISE_FREE // #define JEMALLOC_PURGE_MADVISE_FREE
#define JEMALLOC_PURGE_MADVISE_DONTNEED #define JEMALLOC_PURGE_MADVISE_DONTNEED
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS #define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS

View File

@ -3,9 +3,4 @@ add_library(btrie
include/btrie.h include/btrie.h
) )
target_include_directories (btrie PUBLIC include) target_include_directories (btrie SYSTEM PUBLIC include)
if (ENABLE_TESTS)
add_executable(test_btrie test/test_btrie.c)
target_link_libraries(test_btrie btrie)
endif ()

1
contrib/libc-headers vendored Submodule

@ -0,0 +1 @@
Subproject commit cd82fd9d8eefe50a47a0adf7c617c3ea7d558d11

2
contrib/libcxx vendored

@ -1 +1 @@
Subproject commit 9807685d51db467e097ad5eb8d5c2c16922794b2 Subproject commit f7c63235238a71b7e0563fab8c7c5ec1b54831f6

View File

@ -1,41 +1,45 @@
include(CheckCXXCompilerFlag)
set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx) set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx)
set(SRCS set(SRCS
${LIBCXX_SOURCE_DIR}/src/optional.cpp
${LIBCXX_SOURCE_DIR}/src/variant.cpp
${LIBCXX_SOURCE_DIR}/src/chrono.cpp
${LIBCXX_SOURCE_DIR}/src/thread.cpp
${LIBCXX_SOURCE_DIR}/src/experimental/memory_resource.cpp
${LIBCXX_SOURCE_DIR}/src/iostream.cpp
${LIBCXX_SOURCE_DIR}/src/strstream.cpp
${LIBCXX_SOURCE_DIR}/src/ios.cpp
${LIBCXX_SOURCE_DIR}/src/future.cpp
${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp
${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp
${LIBCXX_SOURCE_DIR}/src/hash.cpp
${LIBCXX_SOURCE_DIR}/src/string.cpp
${LIBCXX_SOURCE_DIR}/src/debug.cpp
${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp
${LIBCXX_SOURCE_DIR}/src/utility.cpp
${LIBCXX_SOURCE_DIR}/src/any.cpp
${LIBCXX_SOURCE_DIR}/src/exception.cpp
${LIBCXX_SOURCE_DIR}/src/memory.cpp
${LIBCXX_SOURCE_DIR}/src/new.cpp
${LIBCXX_SOURCE_DIR}/src/valarray.cpp
${LIBCXX_SOURCE_DIR}/src/vector.cpp
${LIBCXX_SOURCE_DIR}/src/algorithm.cpp ${LIBCXX_SOURCE_DIR}/src/algorithm.cpp
${LIBCXX_SOURCE_DIR}/src/functional.cpp ${LIBCXX_SOURCE_DIR}/src/any.cpp
${LIBCXX_SOURCE_DIR}/src/regex.cpp
${LIBCXX_SOURCE_DIR}/src/bind.cpp ${LIBCXX_SOURCE_DIR}/src/bind.cpp
${LIBCXX_SOURCE_DIR}/src/mutex.cpp
${LIBCXX_SOURCE_DIR}/src/charconv.cpp ${LIBCXX_SOURCE_DIR}/src/charconv.cpp
${LIBCXX_SOURCE_DIR}/src/typeinfo.cpp ${LIBCXX_SOURCE_DIR}/src/chrono.cpp
${LIBCXX_SOURCE_DIR}/src/locale.cpp ${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp ${LIBCXX_SOURCE_DIR}/src/condition_variable_destructor.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp ${LIBCXX_SOURCE_DIR}/src/debug.cpp
${LIBCXX_SOURCE_DIR}/src/exception.cpp
${LIBCXX_SOURCE_DIR}/src/experimental/memory_resource.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp ${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp
${LIBCXX_SOURCE_DIR}/src/system_error.cpp ${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp
${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp
${LIBCXX_SOURCE_DIR}/src/functional.cpp
${LIBCXX_SOURCE_DIR}/src/future.cpp
${LIBCXX_SOURCE_DIR}/src/hash.cpp
${LIBCXX_SOURCE_DIR}/src/ios.cpp
${LIBCXX_SOURCE_DIR}/src/iostream.cpp
${LIBCXX_SOURCE_DIR}/src/locale.cpp
${LIBCXX_SOURCE_DIR}/src/memory.cpp
${LIBCXX_SOURCE_DIR}/src/mutex.cpp
${LIBCXX_SOURCE_DIR}/src/mutex_destructor.cpp
${LIBCXX_SOURCE_DIR}/src/new.cpp
${LIBCXX_SOURCE_DIR}/src/optional.cpp
${LIBCXX_SOURCE_DIR}/src/random.cpp ${LIBCXX_SOURCE_DIR}/src/random.cpp
${LIBCXX_SOURCE_DIR}/src/regex.cpp
${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp
${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp
${LIBCXX_SOURCE_DIR}/src/string.cpp
${LIBCXX_SOURCE_DIR}/src/strstream.cpp
${LIBCXX_SOURCE_DIR}/src/system_error.cpp
${LIBCXX_SOURCE_DIR}/src/thread.cpp
${LIBCXX_SOURCE_DIR}/src/typeinfo.cpp
${LIBCXX_SOURCE_DIR}/src/utility.cpp
${LIBCXX_SOURCE_DIR}/src/valarray.cpp
${LIBCXX_SOURCE_DIR}/src/variant.cpp
${LIBCXX_SOURCE_DIR}/src/vector.cpp
) )
add_library(cxx ${SRCS}) add_library(cxx ${SRCS})
@ -43,8 +47,15 @@ add_library(cxx ${SRCS})
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>) target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>)
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI) target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
target_compile_options(cxx PUBLIC -nostdinc++ -Wno-reserved-id-macro) target_compile_options(cxx PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++>)
if (OS_DARWIN AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9)
check_cxx_compiler_flag(-Wreserved-id-macro HAVE_WARNING_RESERVED_ID_MACRO)
if (HAVE_WARNING_RESERVED_ID_MACRO)
target_compile_options(cxx PUBLIC -Wno-reserved-id-macro)
endif ()
check_cxx_compiler_flag(-Wctad-maybe-unsupported HAVE_WARNING_CTAD_MAYBE_UNSUPPORTED)
if (HAVE_WARNING_CTAD_MAYBE_UNSUPPORTED)
target_compile_options(cxx PUBLIC -Wno-ctad-maybe-unsupported) target_compile_options(cxx PUBLIC -Wno-ctad-maybe-unsupported)
endif () endif ()

2
contrib/libcxxabi vendored

@ -1 +1 @@
Subproject commit d56efcc7a52739518dbe7df9e743073e00951fa1 Subproject commit c26cf36f8387c5edf2cabb4a630f0975c35aa9fb

View File

@ -182,6 +182,9 @@ set(SRCS
${HDFS3_SOURCE_DIR}/common/FileWrapper.h ${HDFS3_SOURCE_DIR}/common/FileWrapper.h
) )
# old kernels (< 3.17) doens't have SYS_getrandom. Always use POSIX implementation to have better compatibility
set_source_files_properties(${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp PROPERTIES COMPILE_FLAGS "-DBOOST_UUID_RANDOM_PROVIDER_FORCE_POSIX=1")
# target # target
add_library(hdfs3 ${SRCS} ${PROTO_SOURCES} ${PROTO_HEADERS}) add_library(hdfs3 ${SRCS} ${PROTO_SOURCES} ${PROTO_HEADERS})

View File

@ -1,2 +0,0 @@
google-perftools@googlegroups.com

View File

@ -1,80 +0,0 @@
message (STATUS "Building: tcmalloc_minimal_internal")
add_library (tcmalloc_minimal_internal
./src/malloc_hook.cc
./src/base/spinlock_internal.cc
./src/base/spinlock.cc
./src/base/dynamic_annotations.c
./src/base/linuxthreads.cc
./src/base/elf_mem_image.cc
./src/base/vdso_support.cc
./src/base/sysinfo.cc
./src/base/low_level_alloc.cc
./src/base/thread_lister.c
./src/base/logging.cc
./src/base/atomicops-internals-x86.cc
./src/memfs_malloc.cc
./src/tcmalloc.cc
./src/malloc_extension.cc
./src/thread_cache.cc
./src/symbolize.cc
./src/page_heap.cc
./src/maybe_threads.cc
./src/central_freelist.cc
./src/static_vars.cc
./src/sampler.cc
./src/internal_logging.cc
./src/system-alloc.cc
./src/span.cc
./src/common.cc
./src/stacktrace.cc
./src/stack_trace_table.cc
./src/heap-checker.cc
./src/heap-checker-bcad.cc
./src/heap-profile-table.cc
./src/raw_printer.cc
./src/memory_region_map.cc
)
target_compile_options (tcmalloc_minimal_internal
PRIVATE
-DNO_TCMALLOC_SAMPLES
-DNDEBUG
-DNO_FRAME_POINTER
-Wwrite-strings
-Wno-sign-compare
-Wno-unused-result
-Wno-deprecated-declarations
-Wno-unused-function
-Wno-unused-private-field
PUBLIC
-fno-builtin-malloc
-fno-builtin-free
-fno-builtin-realloc
-fno-builtin-calloc
-fno-builtin-cfree
-fno-builtin-memalign
-fno-builtin-posix_memalign
-fno-builtin-valloc
-fno-builtin-pvalloc
)
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 3.9.1)
target_compile_options(tcmalloc_minimal_internal PUBLIC -Wno-dynamic-exception-spec )
endif ()
if (CMAKE_SYSTEM MATCHES "FreeBSD" AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
target_compile_options(tcmalloc_minimal_internal PUBLIC -Wno-unused-but-set-variable)
endif ()
if (CMAKE_SYSTEM MATCHES "FreeBSD")
target_compile_definitions(tcmalloc_minimal_internal PUBLIC _GNU_SOURCE)
endif ()
target_include_directories (tcmalloc_minimal_internal PUBLIC include)
target_include_directories (tcmalloc_minimal_internal PRIVATE src)
find_package (Threads)
target_link_libraries (tcmalloc_minimal_internal ${CMAKE_THREAD_LIBS_INIT})

View File

@ -1,28 +0,0 @@
Copyright (c) 2005, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,8 +0,0 @@
https://github.com/gperftools/gperftools/commit/dde32f8bbc95312379f9f5a651799815bb6327c5
Several modifications:
1. Disabled TCMALLOC_AGGRESSIVE_DECOMMIT by default. It is important.
2. Using only files for tcmalloc_minimal build (./configure --enable-minimal).
3. Using some compiler flags from project.
4. Removed warning about unused variable when build with NDEBUG (by default).
5. Including config.h with relative path.

View File

@ -1,422 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Maxim Lifantsev (with design ideas by Sanjay Ghemawat)
//
//
// Module for detecing heap (memory) leaks.
//
// For full(er) information, see doc/heap_checker.html
//
// This module can be linked into programs with
// no slowdown caused by this unless you activate the leak-checker:
//
// 1. Set the environment variable HEAPCHEK to _type_ before
// running the program.
//
// _type_ is usually "normal" but can also be "minimal", "strict", or
// "draconian". (See the html file for other options, like 'local'.)
//
// After that, just run your binary. If the heap-checker detects
// a memory leak at program-exit, it will print instructions on how
// to track down the leak.
#ifndef BASE_HEAP_CHECKER_H_
#define BASE_HEAP_CHECKER_H_
#include <sys/types.h> // for size_t
// I can't #include config.h in this public API file, but I should
// really use configure (and make malloc_extension.h a .in file) to
// figure out if the system has stdint.h or not. But I'm lazy, so
// for now I'm assuming it's a problem only with MSVC.
#ifndef _MSC_VER
#include <stdint.h> // for uintptr_t
#endif
#include <stdarg.h> // for va_list
#include <vector>
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
// The class is thread-safe with respect to all the provided static methods,
// as well as HeapLeakChecker objects: they can be accessed by multiple threads.
class PERFTOOLS_DLL_DECL HeapLeakChecker {
public:
// ----------------------------------------------------------------------- //
// Static functions for working with (whole-program) leak checking.
// If heap leak checking is currently active in some mode
// e.g. if leak checking was started (and is still active now)
// due to HEAPCHECK=... defined in the environment.
// The return value reflects iff HeapLeakChecker objects manually
// constructed right now will be doing leak checking or nothing.
// Note that we can go from active to inactive state during InitGoogle()
// if FLAGS_heap_check gets set to "" by some code before/during InitGoogle().
static bool IsActive();
// Return pointer to the whole-program checker if it has been created
// and NULL otherwise.
// Once GlobalChecker() returns non-NULL that object will not disappear and
// will be returned by all later GlobalChecker calls.
// This is mainly to access BytesLeaked() and ObjectsLeaked() (see below)
// for the whole-program checker after one calls NoGlobalLeaks()
// or similar and gets false.
static HeapLeakChecker* GlobalChecker();
// Do whole-program leak check now (if it was activated for this binary);
// return false only if it was activated and has failed.
// The mode of the check is controlled by the command-line flags.
// This method can be called repeatedly.
// Things like GlobalChecker()->SameHeap() can also be called explicitly
// to do the desired flavor of the check.
static bool NoGlobalLeaks();
// If whole-program checker if active,
// cancel its automatic execution after main() exits.
// This requires that some leak check (e.g. NoGlobalLeaks())
// has been called at least once on the whole-program checker.
static void CancelGlobalCheck();
// ----------------------------------------------------------------------- //
// Non-static functions for starting and doing leak checking.
// Start checking and name the leak check performed.
// The name is used in naming dumped profiles
// and needs to be unique only within your binary.
// It must also be a string that can be a part of a file name,
// in particular not contain path expressions.
explicit HeapLeakChecker(const char *name);
// Destructor (verifies that some *NoLeaks or *SameHeap method
// has been called at least once).
~HeapLeakChecker();
// These used to be different but are all the same now: they return
// true iff all memory allocated since this HeapLeakChecker object
// was constructor is still reachable from global state.
//
// Because we fork to convert addresses to symbol-names, and forking
// is not thread-safe, and we may be called in a threaded context,
// we do not try to symbolize addresses when called manually.
bool NoLeaks() { return DoNoLeaks(DO_NOT_SYMBOLIZE); }
// These forms are obsolete; use NoLeaks() instead.
// TODO(csilvers): mark as DEPRECATED.
bool QuickNoLeaks() { return NoLeaks(); }
bool BriefNoLeaks() { return NoLeaks(); }
bool SameHeap() { return NoLeaks(); }
bool QuickSameHeap() { return NoLeaks(); }
bool BriefSameHeap() { return NoLeaks(); }
// Detailed information about the number of leaked bytes and objects
// (both of these can be negative as well).
// These are available only after a *SameHeap or *NoLeaks
// method has been called.
// Note that it's possible for both of these to be zero
// while SameHeap() or NoLeaks() returned false in case
// of a heap state change that is significant
// but preserves the byte and object counts.
ssize_t BytesLeaked() const;
ssize_t ObjectsLeaked() const;
// ----------------------------------------------------------------------- //
// Static helpers to make us ignore certain leaks.
// Scoped helper class. Should be allocated on the stack inside a
// block of code. Any heap allocations done in the code block
// covered by the scoped object (including in nested function calls
// done by the code block) will not be reported as leaks. This is
// the recommended replacement for the GetDisableChecksStart() and
// DisableChecksToHereFrom() routines below.
//
// Example:
// void Foo() {
// HeapLeakChecker::Disabler disabler;
// ... code that allocates objects whose leaks should be ignored ...
// }
//
// REQUIRES: Destructor runs in same thread as constructor
class Disabler {
public:
Disabler();
~Disabler();
private:
Disabler(const Disabler&); // disallow copy
void operator=(const Disabler&); // and assign
};
// Ignore an object located at 'ptr' (can go at the start or into the object)
// as well as all heap objects (transitively) referenced from it for the
// purposes of heap leak checking. Returns 'ptr' so that one can write
// static T* obj = IgnoreObject(new T(...));
//
// If 'ptr' does not point to an active allocated object at the time of this
// call, it is ignored; but if it does, the object must not get deleted from
// the heap later on.
//
// See also HiddenPointer, below, if you need to prevent a pointer from
// being traversed by the heap checker but do not wish to transitively
// whitelist objects referenced through it.
template <typename T>
static T* IgnoreObject(T* ptr) {
DoIgnoreObject(static_cast<const void*>(const_cast<const T*>(ptr)));
return ptr;
}
// Undo what an earlier IgnoreObject() call promised and asked to do.
// At the time of this call 'ptr' must point at or inside of an active
// allocated object which was previously registered with IgnoreObject().
static void UnIgnoreObject(const void* ptr);
// ----------------------------------------------------------------------- //
// Internal types defined in .cc
class Allocator;
struct RangeValue;
private:
// ----------------------------------------------------------------------- //
// Various helpers
// Create the name of the heap profile file.
// Should be deleted via Allocator::Free().
char* MakeProfileNameLocked();
// Helper for constructors
void Create(const char *name, bool make_start_snapshot);
enum ShouldSymbolize { SYMBOLIZE, DO_NOT_SYMBOLIZE };
// Helper for *NoLeaks and *SameHeap
bool DoNoLeaks(ShouldSymbolize should_symbolize);
// Helper for NoGlobalLeaks, also called by the global destructor.
static bool NoGlobalLeaksMaybeSymbolize(ShouldSymbolize should_symbolize);
// These used to be public, but they are now deprecated.
// Will remove entirely when all internal uses are fixed.
// In the meantime, use friendship so the unittest can still test them.
static void* GetDisableChecksStart();
static void DisableChecksToHereFrom(const void* start_address);
static void DisableChecksIn(const char* pattern);
friend void RangeDisabledLeaks();
friend void NamedTwoDisabledLeaks();
friend void* RunNamedDisabledLeaks(void*);
friend void TestHeapLeakCheckerNamedDisabling();
// Actually implements IgnoreObject().
static void DoIgnoreObject(const void* ptr);
// Disable checks based on stack trace entry at a depth <=
// max_depth. Used to hide allocations done inside some special
// libraries.
static void DisableChecksFromToLocked(const void* start_address,
const void* end_address,
int max_depth);
// Helper for DoNoLeaks to ignore all objects reachable from all live data
static void IgnoreAllLiveObjectsLocked(const void* self_stack_top);
// Callback we pass to TCMalloc_ListAllProcessThreads (see thread_lister.h)
// that is invoked when all threads of our process are found and stopped.
// The call back does the things needed to ignore live data reachable from
// thread stacks and registers for all our threads
// as well as do other global-live-data ignoring
// (via IgnoreNonThreadLiveObjectsLocked)
// during the quiet state of all threads being stopped.
// For the argument meaning see the comment by TCMalloc_ListAllProcessThreads.
// Here we only use num_threads and thread_pids, that TCMalloc_ListAllProcessThreads
// fills for us with the number and pids of all the threads of our process
// it found and attached to.
static int IgnoreLiveThreadsLocked(void* parameter,
int num_threads,
pid_t* thread_pids,
va_list ap);
// Helper for IgnoreAllLiveObjectsLocked and IgnoreLiveThreadsLocked
// that we prefer to execute from IgnoreLiveThreadsLocked
// while all threads are stopped.
// This helper does live object discovery and ignoring
// for all objects that are reachable from everything
// not related to thread stacks and registers.
static void IgnoreNonThreadLiveObjectsLocked();
// Helper for IgnoreNonThreadLiveObjectsLocked and IgnoreLiveThreadsLocked
// to discover and ignore all heap objects
// reachable from currently considered live objects
// (live_objects static global variable in out .cc file).
// "name", "name2" are two strings that we print one after another
// in a debug message to describe what kind of live object sources
// are being used.
static void IgnoreLiveObjectsLocked(const char* name, const char* name2);
// Do the overall whole-program heap leak check if needed;
// returns true when did the leak check.
static bool DoMainHeapCheck();
// Type of task for UseProcMapsLocked
enum ProcMapsTask {
RECORD_GLOBAL_DATA,
DISABLE_LIBRARY_ALLOCS
};
// Success/Error Return codes for UseProcMapsLocked.
enum ProcMapsResult {
PROC_MAPS_USED,
CANT_OPEN_PROC_MAPS,
NO_SHARED_LIBS_IN_PROC_MAPS
};
// Read /proc/self/maps, parse it, and do the 'proc_maps_task' for each line.
static ProcMapsResult UseProcMapsLocked(ProcMapsTask proc_maps_task);
// A ProcMapsTask to disable allocations from 'library'
// that is mapped to [start_address..end_address)
// (only if library is a certain system library).
static void DisableLibraryAllocsLocked(const char* library,
uintptr_t start_address,
uintptr_t end_address);
// Return true iff "*ptr" points to a heap object
// ("*ptr" can point at the start or inside of a heap object
// so that this works e.g. for pointers to C++ arrays, C++ strings,
// multiple-inherited objects, or pointers to members).
// We also fill *object_size for this object then
// and we move "*ptr" to point to the very start of the heap object.
static inline bool HaveOnHeapLocked(const void** ptr, size_t* object_size);
// Helper to shutdown heap leak checker when it's not needed
// or can't function properly.
static void TurnItselfOffLocked();
// Internally-used c-tor to start whole-executable checking.
HeapLeakChecker();
// ----------------------------------------------------------------------- //
// Friends and externally accessed helpers.
// Helper for VerifyHeapProfileTableStackGet in the unittest
// to get the recorded allocation caller for ptr,
// which must be a heap object.
static const void* GetAllocCaller(void* ptr);
friend void VerifyHeapProfileTableStackGet();
// This gets to execute before constructors for all global objects
static void BeforeConstructorsLocked();
friend void HeapLeakChecker_BeforeConstructors();
// This gets to execute after destructors for all global objects
friend void HeapLeakChecker_AfterDestructors();
// Full starting of recommended whole-program checking.
friend void HeapLeakChecker_InternalInitStart();
// Runs REGISTER_HEAPCHECK_CLEANUP cleanups and potentially
// calls DoMainHeapCheck
friend void HeapLeakChecker_RunHeapCleanups();
// ----------------------------------------------------------------------- //
// Member data.
class SpinLock* lock_; // to make HeapLeakChecker objects thread-safe
const char* name_; // our remembered name (we own it)
// NULL means this leak checker is a noop
// Snapshot taken when the checker was created. May be NULL
// for the global heap checker object. We use void* instead of
// HeapProfileTable::Snapshot* to avoid including heap-profile-table.h.
void* start_snapshot_;
bool has_checked_; // if we have done the leak check, so these are ready:
ssize_t inuse_bytes_increase_; // bytes-in-use increase for this checker
ssize_t inuse_allocs_increase_; // allocations-in-use increase
// for this checker
bool keep_profiles_; // iff we should keep the heap profiles we've made
// ----------------------------------------------------------------------- //
// Disallow "evil" constructors.
HeapLeakChecker(const HeapLeakChecker&);
void operator=(const HeapLeakChecker&);
};
// Holds a pointer that will not be traversed by the heap checker.
// Contrast with HeapLeakChecker::IgnoreObject(o), in which o and
// all objects reachable from o are ignored by the heap checker.
template <class T>
class HiddenPointer {
public:
explicit HiddenPointer(T* t)
: masked_t_(reinterpret_cast<uintptr_t>(t) ^ kHideMask) {
}
// Returns unhidden pointer. Be careful where you save the result.
T* get() const { return reinterpret_cast<T*>(masked_t_ ^ kHideMask); }
private:
// Arbitrary value, but not such that xor'ing with it is likely
// to map one valid pointer to another valid pointer:
static const uintptr_t kHideMask =
static_cast<uintptr_t>(0xF03A5F7BF03A5F7Bll);
uintptr_t masked_t_;
};
// A class that exists solely to run its destructor. This class should not be
// used directly, but instead by the REGISTER_HEAPCHECK_CLEANUP macro below.
class PERFTOOLS_DLL_DECL HeapCleaner {
public:
typedef void (*void_function)(void);
HeapCleaner(void_function f);
static void RunHeapCleanups();
private:
static std::vector<void_function>* heap_cleanups_;
};
// A macro to declare module heap check cleanup tasks
// (they run only if we are doing heap leak checking.)
// 'body' should be the cleanup code to run. 'name' doesn't matter,
// but must be unique amongst all REGISTER_HEAPCHECK_CLEANUP calls.
#define REGISTER_HEAPCHECK_CLEANUP(name, body) \
namespace { \
void heapcheck_cleanup_##name() { body; } \
static HeapCleaner heapcheck_cleaner_##name(&heapcheck_cleanup_##name); \
}
#endif // BASE_HEAP_CHECKER_H_

View File

@ -1,105 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*
* Module for heap-profiling.
*
* For full(er) information, see doc/heapprofile.html
*
* This module can be linked into your program with
* no slowdown caused by this unless you activate the profiler
* using one of the following methods:
*
* 1. Before starting the program, set the environment variable
* "HEAPPROFILE" to be the name of the file to which the profile
* data should be written.
*
* 2. Programmatically, start and stop the profiler using the
* routines "HeapProfilerStart(filename)" and "HeapProfilerStop()".
*
*/
#ifndef BASE_HEAP_PROFILER_H_
#define BASE_HEAP_PROFILER_H_
#include <stddef.h>
/* Annoying stuff for windows; makes sure clients can import these functions */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
/* All this code should be usable from within C apps. */
#ifdef __cplusplus
extern "C" {
#endif
/* Start profiling and arrange to write profile data to file names
* of the form: "prefix.0000", "prefix.0001", ...
*/
PERFTOOLS_DLL_DECL void HeapProfilerStart(const char* prefix);
/* Returns non-zero if we are currently profiling the heap. (Returns
* an int rather than a bool so it's usable from C.) This is true
* between calls to HeapProfilerStart() and HeapProfilerStop(), and
* also if the program has been run with HEAPPROFILER, or some other
* way to turn on whole-program profiling.
*/
int IsHeapProfilerRunning();
/* Stop heap profiling. Can be restarted again with HeapProfilerStart(),
* but the currently accumulated profiling information will be cleared.
*/
PERFTOOLS_DLL_DECL void HeapProfilerStop();
/* Dump a profile now - can be used for dumping at a hopefully
* quiescent state in your program, in order to more easily track down
* memory leaks. Will include the reason in the logged message
*/
PERFTOOLS_DLL_DECL void HeapProfilerDump(const char *reason);
/* Generate current heap profiling information.
* Returns an empty string when heap profiling is not active.
* The returned pointer is a '\0'-terminated string allocated using malloc()
* and should be free()-ed as soon as the caller does not need it anymore.
*/
PERFTOOLS_DLL_DECL char* GetHeapProfile();
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* BASE_HEAP_PROFILER_H_ */

View File

@ -1,434 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat <opensource@google.com>
//
// Extra extensions exported by some malloc implementations. These
// extensions are accessed through a virtual base class so an
// application can link against a malloc that does not implement these
// extensions, and it will get default versions that do nothing.
//
// NOTE FOR C USERS: If you wish to use this functionality from within
// a C program, see malloc_extension_c.h.
#ifndef BASE_MALLOC_EXTENSION_H_
#define BASE_MALLOC_EXTENSION_H_
#include <stddef.h>
// I can't #include config.h in this public API file, but I should
// really use configure (and make malloc_extension.h a .in file) to
// figure out if the system has stdint.h or not. But I'm lazy, so
// for now I'm assuming it's a problem only with MSVC.
#ifndef _MSC_VER
#include <stdint.h>
#endif
#include <string>
#include <vector>
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
static const int kMallocHistogramSize = 64;
// One day, we could support other types of writers (perhaps for C?)
typedef std::string MallocExtensionWriter;
namespace base {
struct MallocRange;
}
// Interface to a pluggable system allocator.
class PERFTOOLS_DLL_DECL SysAllocator {
public:
SysAllocator() {
}
virtual ~SysAllocator();
// Allocates "size"-byte of memory from system aligned with "alignment".
// Returns NULL if failed. Otherwise, the returned pointer p up to and
// including (p + actual_size -1) have been allocated.
virtual void* Alloc(size_t size, size_t *actual_size, size_t alignment) = 0;
};
// The default implementations of the following routines do nothing.
// All implementations should be thread-safe; the current one
// (TCMallocImplementation) is.
class PERFTOOLS_DLL_DECL MallocExtension {
public:
virtual ~MallocExtension();
// Call this very early in the program execution -- say, in a global
// constructor -- to set up parameters and state needed by all
// instrumented malloc implemenatations. One example: this routine
// sets environemnt variables to tell STL to use libc's malloc()
// instead of doing its own memory management. This is safe to call
// multiple times, as long as each time is before threads start up.
static void Initialize();
// See "verify_memory.h" to see what these routines do
virtual bool VerifyAllMemory();
virtual bool VerifyNewMemory(const void* p);
virtual bool VerifyArrayNewMemory(const void* p);
virtual bool VerifyMallocMemory(const void* p);
virtual bool MallocMemoryStats(int* blocks, size_t* total,
int histogram[kMallocHistogramSize]);
// Get a human readable description of the following malloc data structures.
// - Total inuse memory by application.
// - Free memory(thread, central and page heap),
// - Freelist of central cache, each class.
// - Page heap freelist.
// The state is stored as a null-terminated string
// in a prefix of "buffer[0,buffer_length-1]".
// REQUIRES: buffer_length > 0.
virtual void GetStats(char* buffer, int buffer_length);
// Outputs to "writer" a sample of live objects and the stack traces
// that allocated these objects. The format of the returned output
// is equivalent to the output of the heap profiler and can
// therefore be passed to "pprof". This function is equivalent to
// ReadStackTraces. The main difference is that this function returns
// serialized data appropriately formatted for use by the pprof tool.
// NOTE: by default, tcmalloc does not do any heap sampling, and this
// function will always return an empty sample. To get useful
// data from GetHeapSample, you must also set the environment
// variable TCMALLOC_SAMPLE_PARAMETER to a value such as 524288.
virtual void GetHeapSample(MallocExtensionWriter* writer);
// Outputs to "writer" the stack traces that caused growth in the
// address space size. The format of the returned output is
// equivalent to the output of the heap profiler and can therefore
// be passed to "pprof". This function is equivalent to
// ReadHeapGrowthStackTraces. The main difference is that this function
// returns serialized data appropriately formatted for use by the
// pprof tool. (This does not depend on, or require,
// TCMALLOC_SAMPLE_PARAMETER.)
virtual void GetHeapGrowthStacks(MallocExtensionWriter* writer);
// Invokes func(arg, range) for every controlled memory
// range. *range is filled in with information about the range.
//
// This is a best-effort interface useful only for performance
// analysis. The implementation may not call func at all.
typedef void (RangeFunction)(void*, const base::MallocRange*);
virtual void Ranges(void* arg, RangeFunction func);
// -------------------------------------------------------------------
// Control operations for getting and setting malloc implementation
// specific parameters. Some currently useful properties:
//
// generic
// -------
// "generic.current_allocated_bytes"
// Number of bytes currently allocated by application
// This property is not writable.
//
// "generic.heap_size"
// Number of bytes in the heap ==
// current_allocated_bytes +
// fragmentation +
// freed memory regions
// This property is not writable.
//
// tcmalloc
// --------
// "tcmalloc.max_total_thread_cache_bytes"
// Upper limit on total number of bytes stored across all
// per-thread caches. Default: 16MB.
//
// "tcmalloc.current_total_thread_cache_bytes"
// Number of bytes used across all thread caches.
// This property is not writable.
//
// "tcmalloc.central_cache_free_bytes"
// Number of free bytes in the central cache that have been
// assigned to size classes. They always count towards virtual
// memory usage, and unless the underlying memory is swapped out
// by the OS, they also count towards physical memory usage.
// This property is not writable.
//
// "tcmalloc.transfer_cache_free_bytes"
// Number of free bytes that are waiting to be transfered between
// the central cache and a thread cache. They always count
// towards virtual memory usage, and unless the underlying memory
// is swapped out by the OS, they also count towards physical
// memory usage. This property is not writable.
//
// "tcmalloc.thread_cache_free_bytes"
// Number of free bytes in thread caches. They always count
// towards virtual memory usage, and unless the underlying memory
// is swapped out by the OS, they also count towards physical
// memory usage. This property is not writable.
//
// "tcmalloc.pageheap_free_bytes"
// Number of bytes in free, mapped pages in page heap. These
// bytes can be used to fulfill allocation requests. They
// always count towards virtual memory usage, and unless the
// underlying memory is swapped out by the OS, they also count
// towards physical memory usage. This property is not writable.
//
// "tcmalloc.pageheap_unmapped_bytes"
// Number of bytes in free, unmapped pages in page heap.
// These are bytes that have been released back to the OS,
// possibly by one of the MallocExtension "Release" calls.
// They can be used to fulfill allocation requests, but
// typically incur a page fault. They always count towards
// virtual memory usage, and depending on the OS, typically
// do not count towards physical memory usage. This property
// is not writable.
// -------------------------------------------------------------------
// Get the named "property"'s value. Returns true if the property
// is known. Returns false if the property is not a valid property
// name for the current malloc implementation.
// REQUIRES: property != NULL; value != NULL
virtual bool GetNumericProperty(const char* property, size_t* value);
// Set the named "property"'s value. Returns true if the property
// is known and writable. Returns false if the property is not a
// valid property name for the current malloc implementation, or
// is not writable.
// REQUIRES: property != NULL
virtual bool SetNumericProperty(const char* property, size_t value);
// Mark the current thread as "idle". This routine may optionally
// be called by threads as a hint to the malloc implementation that
// any thread-specific resources should be released. Note: this may
// be an expensive routine, so it should not be called too often.
//
// Also, if the code that calls this routine will go to sleep for
// a while, it should take care to not allocate anything between
// the call to this routine and the beginning of the sleep.
//
// Most malloc implementations ignore this routine.
virtual void MarkThreadIdle();
// Mark the current thread as "busy". This routine should be
// called after MarkThreadIdle() if the thread will now do more
// work. If this method is not called, performance may suffer.
//
// Most malloc implementations ignore this routine.
virtual void MarkThreadBusy();
// Gets the system allocator used by the malloc extension instance. Returns
// NULL for malloc implementations that do not support pluggable system
// allocators.
virtual SysAllocator* GetSystemAllocator();
// Sets the system allocator to the specified.
//
// Users could register their own system allocators for malloc implementation
// that supports pluggable system allocators, such as TCMalloc, by doing:
// alloc = new MyOwnSysAllocator();
// MallocExtension::instance()->SetSystemAllocator(alloc);
// It's up to users whether to fall back (recommended) to the default
// system allocator (use GetSystemAllocator() above) or not. The caller is
// responsible to any necessary locking.
// See tcmalloc/system-alloc.h for the interface and
// tcmalloc/memfs_malloc.cc for the examples.
//
// It's a no-op for malloc implementations that do not support pluggable
// system allocators.
virtual void SetSystemAllocator(SysAllocator *a);
// Try to release num_bytes of free memory back to the operating
// system for reuse. Use this extension with caution -- to get this
// memory back may require faulting pages back in by the OS, and
// that may be slow. (Currently only implemented in tcmalloc.)
virtual void ReleaseToSystem(size_t num_bytes);
// Same as ReleaseToSystem() but release as much memory as possible.
virtual void ReleaseFreeMemory();
// Sets the rate at which we release unused memory to the system.
// Zero means we never release memory back to the system. Increase
// this flag to return memory faster; decrease it to return memory
// slower. Reasonable rates are in the range [0,10]. (Currently
// only implemented in tcmalloc).
virtual void SetMemoryReleaseRate(double rate);
// Gets the release rate. Returns a value < 0 if unknown.
virtual double GetMemoryReleaseRate();
// Returns the estimated number of bytes that will be allocated for
// a request of "size" bytes. This is an estimate: an allocation of
// SIZE bytes may reserve more bytes, but will never reserve less.
// (Currently only implemented in tcmalloc, other implementations
// always return SIZE.)
// This is equivalent to malloc_good_size() in OS X.
virtual size_t GetEstimatedAllocatedSize(size_t size);
// Returns the actual number N of bytes reserved by tcmalloc for the
// pointer p. The client is allowed to use the range of bytes
// [p, p+N) in any way it wishes (i.e. N is the "usable size" of this
// allocation). This number may be equal to or greater than the number
// of bytes requested when p was allocated.
// p must have been allocated by this malloc implementation,
// must not be an interior pointer -- that is, must be exactly
// the pointer returned to by malloc() et al., not some offset
// from that -- and should not have been freed yet. p may be NULL.
// (Currently only implemented in tcmalloc; other implementations
// will return 0.)
// This is equivalent to malloc_size() in OS X, malloc_usable_size()
// in glibc, and _msize() for windows.
virtual size_t GetAllocatedSize(const void* p);
// Returns kOwned if this malloc implementation allocated the memory
// pointed to by p, or kNotOwned if some other malloc implementation
// allocated it or p is NULL. May also return kUnknownOwnership if
// the malloc implementation does not keep track of ownership.
// REQUIRES: p must be a value returned from a previous call to
// malloc(), calloc(), realloc(), memalign(), posix_memalign(),
// valloc(), pvalloc(), new, or new[], and must refer to memory that
// is currently allocated (so, for instance, you should not pass in
// a pointer after having called free() on it).
enum Ownership {
// NOTE: Enum values MUST be kept in sync with the version in
// malloc_extension_c.h
kUnknownOwnership = 0,
kOwned,
kNotOwned
};
virtual Ownership GetOwnership(const void* p);
// The current malloc implementation. Always non-NULL.
static MallocExtension* instance();
// Change the malloc implementation. Typically called by the
// malloc implementation during initialization.
static void Register(MallocExtension* implementation);
// Returns detailed information about malloc's freelists. For each list,
// return a FreeListInfo:
struct FreeListInfo {
size_t min_object_size;
size_t max_object_size;
size_t total_bytes_free;
const char* type;
};
// Each item in the vector refers to a different freelist. The lists
// are identified by the range of allocations that objects in the
// list can satisfy ([min_object_size, max_object_size]) and the
// type of freelist (see below). The current size of the list is
// returned in total_bytes_free (which count against a processes
// resident and virtual size).
//
// Currently supported types are:
//
// "tcmalloc.page{_unmapped}" - tcmalloc's page heap. An entry for each size
// class in the page heap is returned. Bytes in "page_unmapped"
// are no longer backed by physical memory and do not count against
// the resident size of a process.
//
// "tcmalloc.large{_unmapped}" - tcmalloc's list of objects larger
// than the largest page heap size class. Only one "large"
// entry is returned. There is no upper-bound on the size
// of objects in the large free list; this call returns
// kint64max for max_object_size. Bytes in
// "large_unmapped" are no longer backed by physical memory
// and do not count against the resident size of a process.
//
// "tcmalloc.central" - tcmalloc's central free-list. One entry per
// size-class is returned. Never unmapped.
//
// "debug.free_queue" - free objects queued by the debug allocator
// and not returned to tcmalloc.
//
// "tcmalloc.thread" - tcmalloc's per-thread caches. Never unmapped.
virtual void GetFreeListSizes(std::vector<FreeListInfo>* v);
// Get a list of stack traces of sampled allocation points. Returns
// a pointer to a "new[]-ed" result array, and stores the sample
// period in "sample_period".
//
// The state is stored as a sequence of adjacent entries
// in the returned array. Each entry has the following form:
// uintptr_t count; // Number of objects with following trace
// uintptr_t size; // Total size of objects with following trace
// uintptr_t depth; // Number of PC values in stack trace
// void* stack[depth]; // PC values that form the stack trace
//
// The list of entries is terminated by a "count" of 0.
//
// It is the responsibility of the caller to "delete[]" the returned array.
//
// May return NULL to indicate no results.
//
// This is an internal extension. Callers should use the more
// convenient "GetHeapSample(string*)" method defined above.
virtual void** ReadStackTraces(int* sample_period);
// Like ReadStackTraces(), but returns stack traces that caused growth
// in the address space size.
virtual void** ReadHeapGrowthStackTraces();
// Returns the size in bytes of the calling threads cache.
virtual size_t GetThreadCacheSize();
// Like MarkThreadIdle, but does not destroy the internal data
// structures of the thread cache. When the thread resumes, it wil
// have an empty cache but will not need to pay to reconstruct the
// cache data structures.
virtual void MarkThreadTemporarilyIdle();
};
namespace base {
// Information passed per range. More fields may be added later.
struct MallocRange {
enum Type {
INUSE, // Application is using this range
FREE, // Range is currently free
UNMAPPED, // Backing physical memory has been returned to the OS
UNKNOWN
// More enum values may be added in the future
};
uintptr_t address; // Address of range
size_t length; // Byte length of range
Type type; // Type of this range
double fraction; // Fraction of range that is being used (0 if !INUSE)
// Perhaps add the following:
// - stack trace if this range was sampled
// - heap growth stack trace if applicable to this range
// - age when allocated (for inuse) or freed (if not in use)
};
} // namespace base
#endif // BASE_MALLOC_EXTENSION_H_

View File

@ -1,101 +0,0 @@
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* --
* Author: Craig Silverstein
*
* C shims for the C++ malloc_extension.h. See malloc_extension.h for
* details. Note these C shims always work on
* MallocExtension::instance(); it is not possible to have more than
* one MallocExtension object in C applications.
*/
#ifndef _MALLOC_EXTENSION_C_H_
#define _MALLOC_EXTENSION_C_H_
#include <stddef.h>
#include <sys/types.h>
/* Annoying stuff for windows -- makes sure clients can import these fns */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define kMallocExtensionHistogramSize 64
PERFTOOLS_DLL_DECL int MallocExtension_VerifyAllMemory(void);
PERFTOOLS_DLL_DECL int MallocExtension_VerifyNewMemory(const void* p);
PERFTOOLS_DLL_DECL int MallocExtension_VerifyArrayNewMemory(const void* p);
PERFTOOLS_DLL_DECL int MallocExtension_VerifyMallocMemory(const void* p);
PERFTOOLS_DLL_DECL int MallocExtension_MallocMemoryStats(int* blocks, size_t* total,
int histogram[kMallocExtensionHistogramSize]);
PERFTOOLS_DLL_DECL void MallocExtension_GetStats(char* buffer, int buffer_length);
/* TODO(csilvers): write a C version of these routines, that perhaps
* takes a function ptr and a void *.
*/
/* void MallocExtension_GetHeapSample(string* result); */
/* void MallocExtension_GetHeapGrowthStacks(string* result); */
PERFTOOLS_DLL_DECL int MallocExtension_GetNumericProperty(const char* property, size_t* value);
PERFTOOLS_DLL_DECL int MallocExtension_SetNumericProperty(const char* property, size_t value);
PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadIdle(void);
PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadBusy(void);
PERFTOOLS_DLL_DECL void MallocExtension_ReleaseToSystem(size_t num_bytes);
PERFTOOLS_DLL_DECL void MallocExtension_ReleaseFreeMemory(void);
PERFTOOLS_DLL_DECL size_t MallocExtension_GetEstimatedAllocatedSize(size_t size);
PERFTOOLS_DLL_DECL size_t MallocExtension_GetAllocatedSize(const void* p);
PERFTOOLS_DLL_DECL size_t MallocExtension_GetThreadCacheSize(void);
PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadTemporarilyIdle(void);
/*
* NOTE: These enum values MUST be kept in sync with the version in
* malloc_extension.h
*/
typedef enum {
MallocExtension_kUnknownOwnership = 0,
MallocExtension_kOwned,
MallocExtension_kNotOwned
} MallocExtension_Ownership;
PERFTOOLS_DLL_DECL MallocExtension_Ownership MallocExtension_GetOwnership(const void* p);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* _MALLOC_EXTENSION_C_H_ */

View File

@ -1,359 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// Some of our malloc implementations can invoke the following hooks whenever
// memory is allocated or deallocated. MallocHook is thread-safe, and things
// you do before calling AddFooHook(MyHook) are visible to any resulting calls
// to MyHook. Hooks must be thread-safe. If you write:
//
// CHECK(MallocHook::AddNewHook(&MyNewHook));
//
// MyNewHook will be invoked in subsequent calls in the current thread, but
// there are no guarantees on when it might be invoked in other threads.
//
// There are a limited number of slots available for each hook type. Add*Hook
// will return false if there are no slots available. Remove*Hook will return
// false if the given hook was not already installed.
//
// The order in which individual hooks are called in Invoke*Hook is undefined.
//
// It is safe for a hook to remove itself within Invoke*Hook and add other
// hooks. Any hooks added inside a hook invocation (for the same hook type)
// will not be invoked for the current invocation.
//
// One important user of these hooks is the heap profiler.
//
// CAVEAT: If you add new MallocHook::Invoke* calls then those calls must be
// directly in the code of the (de)allocation function that is provided to the
// user and that function must have an ATTRIBUTE_SECTION(malloc_hook) attribute.
//
// Note: the Invoke*Hook() functions are defined in malloc_hook-inl.h. If you
// need to invoke a hook (which you shouldn't unless you're part of tcmalloc),
// be sure to #include malloc_hook-inl.h in addition to malloc_hook.h.
//
// NOTE FOR C USERS: If you want to use malloc_hook functionality from
// a C program, #include malloc_hook_c.h instead of this file.
#ifndef _MALLOC_HOOK_H_
#define _MALLOC_HOOK_H_
#include <stddef.h>
#include <sys/types.h>
extern "C" {
#include "malloc_hook_c.h" // a C version of the malloc_hook interface
}
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
// The C++ methods below call the C version (MallocHook_*), and thus
// convert between an int and a bool. Windows complains about this
// (a "performance warning") which we don't care about, so we suppress.
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4800)
#endif
// Note: malloc_hook_c.h defines MallocHook_*Hook and
// MallocHook_{Add,Remove}*Hook. The version of these inside the MallocHook
// class are defined in terms of the malloc_hook_c version. See malloc_hook_c.h
// for details of these types/functions.
class PERFTOOLS_DLL_DECL MallocHook {
public:
// The NewHook is invoked whenever an object is allocated.
// It may be passed NULL if the allocator returned NULL.
typedef MallocHook_NewHook NewHook;
inline static bool AddNewHook(NewHook hook) {
return MallocHook_AddNewHook(hook);
}
inline static bool RemoveNewHook(NewHook hook) {
return MallocHook_RemoveNewHook(hook);
}
inline static void InvokeNewHook(const void* p, size_t s);
// The DeleteHook is invoked whenever an object is deallocated.
// It may be passed NULL if the caller is trying to delete NULL.
typedef MallocHook_DeleteHook DeleteHook;
inline static bool AddDeleteHook(DeleteHook hook) {
return MallocHook_AddDeleteHook(hook);
}
inline static bool RemoveDeleteHook(DeleteHook hook) {
return MallocHook_RemoveDeleteHook(hook);
}
inline static void InvokeDeleteHook(const void* p);
// The PreMmapHook is invoked with mmap or mmap64 arguments just
// before the call is actually made. Such a hook may be useful
// in memory limited contexts, to catch allocations that will exceed
// a memory limit, and take outside actions to increase that limit.
typedef MallocHook_PreMmapHook PreMmapHook;
inline static bool AddPreMmapHook(PreMmapHook hook) {
return MallocHook_AddPreMmapHook(hook);
}
inline static bool RemovePreMmapHook(PreMmapHook hook) {
return MallocHook_RemovePreMmapHook(hook);
}
inline static void InvokePreMmapHook(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
// The MmapReplacement is invoked after the PreMmapHook but before
// the call is actually made. The MmapReplacement should return true
// if it handled the call, or false if it is still necessary to
// call mmap/mmap64.
// This should be used only by experts, and users must be be
// extremely careful to avoid recursive calls to mmap. The replacement
// should be async signal safe.
// Only one MmapReplacement is supported. After setting an MmapReplacement
// you must call RemoveMmapReplacement before calling SetMmapReplacement
// again.
typedef MallocHook_MmapReplacement MmapReplacement;
inline static bool SetMmapReplacement(MmapReplacement hook) {
return MallocHook_SetMmapReplacement(hook);
}
inline static bool RemoveMmapReplacement(MmapReplacement hook) {
return MallocHook_RemoveMmapReplacement(hook);
}
inline static bool InvokeMmapReplacement(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result);
// The MmapHook is invoked whenever a region of memory is mapped.
// It may be passed MAP_FAILED if the mmap failed.
typedef MallocHook_MmapHook MmapHook;
inline static bool AddMmapHook(MmapHook hook) {
return MallocHook_AddMmapHook(hook);
}
inline static bool RemoveMmapHook(MmapHook hook) {
return MallocHook_RemoveMmapHook(hook);
}
inline static void InvokeMmapHook(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
// The MunmapReplacement is invoked with munmap arguments just before
// the call is actually made. The MunmapReplacement should return true
// if it handled the call, or false if it is still necessary to
// call munmap.
// This should be used only by experts. The replacement should be
// async signal safe.
// Only one MunmapReplacement is supported. After setting an
// MunmapReplacement you must call RemoveMunmapReplacement before
// calling SetMunmapReplacement again.
typedef MallocHook_MunmapReplacement MunmapReplacement;
inline static bool SetMunmapReplacement(MunmapReplacement hook) {
return MallocHook_SetMunmapReplacement(hook);
}
inline static bool RemoveMunmapReplacement(MunmapReplacement hook) {
return MallocHook_RemoveMunmapReplacement(hook);
}
inline static bool InvokeMunmapReplacement(const void* p,
size_t size,
int* result);
// The MunmapHook is invoked whenever a region of memory is unmapped.
typedef MallocHook_MunmapHook MunmapHook;
inline static bool AddMunmapHook(MunmapHook hook) {
return MallocHook_AddMunmapHook(hook);
}
inline static bool RemoveMunmapHook(MunmapHook hook) {
return MallocHook_RemoveMunmapHook(hook);
}
inline static void InvokeMunmapHook(const void* p, size_t size);
// The MremapHook is invoked whenever a region of memory is remapped.
typedef MallocHook_MremapHook MremapHook;
inline static bool AddMremapHook(MremapHook hook) {
return MallocHook_AddMremapHook(hook);
}
inline static bool RemoveMremapHook(MremapHook hook) {
return MallocHook_RemoveMremapHook(hook);
}
inline static void InvokeMremapHook(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr);
// The PreSbrkHook is invoked just before sbrk is called -- except when
// the increment is 0. This is because sbrk(0) is often called
// to get the top of the memory stack, and is not actually a
// memory-allocation call. It may be useful in memory-limited contexts,
// to catch allocations that will exceed the limit and take outside
// actions to increase such a limit.
typedef MallocHook_PreSbrkHook PreSbrkHook;
inline static bool AddPreSbrkHook(PreSbrkHook hook) {
return MallocHook_AddPreSbrkHook(hook);
}
inline static bool RemovePreSbrkHook(PreSbrkHook hook) {
return MallocHook_RemovePreSbrkHook(hook);
}
inline static void InvokePreSbrkHook(ptrdiff_t increment);
// The SbrkHook is invoked whenever sbrk is called -- except when
// the increment is 0. This is because sbrk(0) is often called
// to get the top of the memory stack, and is not actually a
// memory-allocation call.
typedef MallocHook_SbrkHook SbrkHook;
inline static bool AddSbrkHook(SbrkHook hook) {
return MallocHook_AddSbrkHook(hook);
}
inline static bool RemoveSbrkHook(SbrkHook hook) {
return MallocHook_RemoveSbrkHook(hook);
}
inline static void InvokeSbrkHook(const void* result, ptrdiff_t increment);
// Get the current stack trace. Try to skip all routines up to and
// and including the caller of MallocHook::Invoke*.
// Use "skip_count" (similarly to GetStackTrace from stacktrace.h)
// as a hint about how many routines to skip if better information
// is not available.
inline static int GetCallerStackTrace(void** result, int max_depth,
int skip_count) {
return MallocHook_GetCallerStackTrace(result, max_depth, skip_count);
}
// Unhooked versions of mmap() and munmap(). These should be used
// only by experts, since they bypass heapchecking, etc.
// Note: These do not run hooks, but they still use the MmapReplacement
// and MunmapReplacement.
static void* UnhookedMMap(void *start, size_t length, int prot, int flags,
int fd, off_t offset);
static int UnhookedMUnmap(void *start, size_t length);
// The following are DEPRECATED.
inline static NewHook GetNewHook();
inline static NewHook SetNewHook(NewHook hook) {
return MallocHook_SetNewHook(hook);
}
inline static DeleteHook GetDeleteHook();
inline static DeleteHook SetDeleteHook(DeleteHook hook) {
return MallocHook_SetDeleteHook(hook);
}
inline static PreMmapHook GetPreMmapHook();
inline static PreMmapHook SetPreMmapHook(PreMmapHook hook) {
return MallocHook_SetPreMmapHook(hook);
}
inline static MmapHook GetMmapHook();
inline static MmapHook SetMmapHook(MmapHook hook) {
return MallocHook_SetMmapHook(hook);
}
inline static MunmapHook GetMunmapHook();
inline static MunmapHook SetMunmapHook(MunmapHook hook) {
return MallocHook_SetMunmapHook(hook);
}
inline static MremapHook GetMremapHook();
inline static MremapHook SetMremapHook(MremapHook hook) {
return MallocHook_SetMremapHook(hook);
}
inline static PreSbrkHook GetPreSbrkHook();
inline static PreSbrkHook SetPreSbrkHook(PreSbrkHook hook) {
return MallocHook_SetPreSbrkHook(hook);
}
inline static SbrkHook GetSbrkHook();
inline static SbrkHook SetSbrkHook(SbrkHook hook) {
return MallocHook_SetSbrkHook(hook);
}
// End of DEPRECATED methods.
private:
// Slow path versions of Invoke*Hook.
static void InvokeNewHookSlow(const void* p, size_t s);
static void InvokeDeleteHookSlow(const void* p);
static void InvokePreMmapHookSlow(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
static void InvokeMmapHookSlow(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
static bool InvokeMmapReplacementSlow(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result);
static void InvokeMunmapHookSlow(const void* p, size_t size);
static bool InvokeMunmapReplacementSlow(const void* p,
size_t size,
int* result);
static void InvokeMremapHookSlow(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr);
static void InvokePreSbrkHookSlow(ptrdiff_t increment);
static void InvokeSbrkHookSlow(const void* result, ptrdiff_t increment);
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif /* _MALLOC_HOOK_H_ */

View File

@ -1,173 +0,0 @@
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* --
* Author: Craig Silverstein
*
* C shims for the C++ malloc_hook.h. See malloc_hook.h for details
* on how to use these.
*/
#ifndef _MALLOC_HOOK_C_H_
#define _MALLOC_HOOK_C_H_
#include <stddef.h>
#include <sys/types.h>
/* Annoying stuff for windows; makes sure clients can import these functions */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Get the current stack trace. Try to skip all routines up to and
* and including the caller of MallocHook::Invoke*.
* Use "skip_count" (similarly to GetStackTrace from stacktrace.h)
* as a hint about how many routines to skip if better information
* is not available.
*/
PERFTOOLS_DLL_DECL
int MallocHook_GetCallerStackTrace(void** result, int max_depth,
int skip_count);
/* The MallocHook_{Add,Remove}*Hook functions return 1 on success and 0 on
* failure.
*/
typedef void (*MallocHook_NewHook)(const void* ptr, size_t size);
PERFTOOLS_DLL_DECL
int MallocHook_AddNewHook(MallocHook_NewHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveNewHook(MallocHook_NewHook hook);
typedef void (*MallocHook_DeleteHook)(const void* ptr);
PERFTOOLS_DLL_DECL
int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook);
typedef void (*MallocHook_PreMmapHook)(const void *start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
PERFTOOLS_DLL_DECL
int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook);
typedef void (*MallocHook_MmapHook)(const void* result,
const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset);
PERFTOOLS_DLL_DECL
int MallocHook_AddMmapHook(MallocHook_MmapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook);
typedef int (*MallocHook_MmapReplacement)(const void* start,
size_t size,
int protection,
int flags,
int fd,
off_t offset,
void** result);
int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook);
int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook);
typedef void (*MallocHook_MunmapHook)(const void* ptr, size_t size);
PERFTOOLS_DLL_DECL
int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook);
typedef int (*MallocHook_MunmapReplacement)(const void* ptr,
size_t size,
int* result);
int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook);
int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook);
typedef void (*MallocHook_MremapHook)(const void* result,
const void* old_addr,
size_t old_size,
size_t new_size,
int flags,
const void* new_addr);
PERFTOOLS_DLL_DECL
int MallocHook_AddMremapHook(MallocHook_MremapHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook);
typedef void (*MallocHook_PreSbrkHook)(ptrdiff_t increment);
PERFTOOLS_DLL_DECL
int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook);
typedef void (*MallocHook_SbrkHook)(const void* result, ptrdiff_t increment);
PERFTOOLS_DLL_DECL
int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook);
PERFTOOLS_DLL_DECL
int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook);
/* The following are DEPRECATED. */
PERFTOOLS_DLL_DECL
MallocHook_NewHook MallocHook_SetNewHook(MallocHook_NewHook hook);
PERFTOOLS_DLL_DECL
MallocHook_DeleteHook MallocHook_SetDeleteHook(MallocHook_DeleteHook hook);
PERFTOOLS_DLL_DECL
MallocHook_PreMmapHook MallocHook_SetPreMmapHook(MallocHook_PreMmapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_MmapHook MallocHook_SetMmapHook(MallocHook_MmapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_MunmapHook MallocHook_SetMunmapHook(MallocHook_MunmapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_MremapHook MallocHook_SetMremapHook(MallocHook_MremapHook hook);
PERFTOOLS_DLL_DECL
MallocHook_PreSbrkHook MallocHook_SetPreSbrkHook(MallocHook_PreSbrkHook hook);
PERFTOOLS_DLL_DECL
MallocHook_SbrkHook MallocHook_SetSbrkHook(MallocHook_SbrkHook hook);
/* End of DEPRECATED functions. */
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* _MALLOC_HOOK_C_H_ */

View File

@ -1,169 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*
* Module for CPU profiling based on periodic pc-sampling.
*
* For full(er) information, see doc/cpuprofile.html
*
* This module is linked into your program with
* no slowdown caused by this unless you activate the profiler
* using one of the following methods:
*
* 1. Before starting the program, set the environment variable
* "CPUPROFILE" to be the name of the file to which the profile
* data should be written.
*
* 2. Programmatically, start and stop the profiler using the
* routines "ProfilerStart(filename)" and "ProfilerStop()".
*
*
* (Note: if using linux 2.4 or earlier, only the main thread may be
* profiled.)
*
* Use pprof to view the resulting profile output.
* % pprof <path_to_executable> <profile_file_name>
* % pprof --gv <path_to_executable> <profile_file_name>
*
* These functions are thread-safe.
*/
#ifndef BASE_PROFILER_H_
#define BASE_PROFILER_H_
#include <time.h> /* For time_t */
/* Annoying stuff for windows; makes sure clients can import these functions */
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
/* All this code should be usable from within C apps. */
#ifdef __cplusplus
extern "C" {
#endif
/* Profiler options, for use with ProfilerStartWithOptions. To use:
*
* struct ProfilerOptions options;
* memset(&options, 0, sizeof options);
*
* then fill in fields as needed.
*
* This structure is intended to be usable from C code, so no constructor
* is provided to initialize it. (Use memset as described above).
*/
struct ProfilerOptions {
/* Filter function and argument.
*
* If filter_in_thread is not NULL, when a profiling tick is delivered
* the profiler will call:
*
* (*filter_in_thread)(filter_in_thread_arg)
*
* If it returns nonzero, the sample will be included in the profile.
* Note that filter_in_thread runs in a signal handler, so must be
* async-signal-safe.
*
* A typical use would be to set up filter results for each thread
* in the system before starting the profiler, then to make
* filter_in_thread be a very simple function which retrieves those
* results in an async-signal-safe way. Retrieval could be done
* using thread-specific data, or using a shared data structure that
* supports async-signal-safe lookups.
*/
int (*filter_in_thread)(void *arg);
void *filter_in_thread_arg;
};
/* Start profiling and write profile info into fname, discarding any
* existing profiling data in that file.
*
* This is equivalent to calling ProfilerStartWithOptions(fname, NULL).
*/
PERFTOOLS_DLL_DECL int ProfilerStart(const char* fname);
/* Start profiling and write profile into fname, discarding any
* existing profiling data in that file.
*
* The profiler is configured using the options given by 'options'.
* Options which are not specified are given default values.
*
* 'options' may be NULL, in which case all are given default values.
*
* Returns nonzero if profiling was started successfully, or zero else.
*/
PERFTOOLS_DLL_DECL int ProfilerStartWithOptions(
const char *fname, const struct ProfilerOptions *options);
/* Stop profiling. Can be started again with ProfilerStart(), but
* the currently accumulated profiling data will be cleared.
*/
PERFTOOLS_DLL_DECL void ProfilerStop(void);
/* Flush any currently buffered profiling state to the profile file.
* Has no effect if the profiler has not been started.
*/
PERFTOOLS_DLL_DECL void ProfilerFlush(void);
/* DEPRECATED: these functions were used to enable/disable profiling
* in the current thread, but no longer do anything.
*/
PERFTOOLS_DLL_DECL void ProfilerEnable(void);
PERFTOOLS_DLL_DECL void ProfilerDisable(void);
/* Returns nonzero if profile is currently enabled, zero if it's not. */
PERFTOOLS_DLL_DECL int ProfilingIsEnabledForAllThreads(void);
/* Routine for registering new threads with the profiler.
*/
PERFTOOLS_DLL_DECL void ProfilerRegisterThread(void);
/* Stores state about profiler's current status into "*state". */
struct ProfilerState {
int enabled; /* Is profiling currently enabled? */
time_t start_time; /* If enabled, when was profiling started? */
char profile_name[1024]; /* Name of profile file being written, or '\0' */
int samples_gathered; /* Number of samples gathered so far (or 0) */
};
PERFTOOLS_DLL_DECL void ProfilerGetCurrentState(struct ProfilerState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* BASE_PROFILER_H_ */

View File

@ -1,117 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// Routines to extract the current stack trace. These functions are
// thread-safe.
#ifndef GOOGLE_STACKTRACE_H_
#define GOOGLE_STACKTRACE_H_
// Annoying stuff for windows -- makes sure clients can import these functions
#ifndef PERFTOOLS_DLL_DECL
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
// Skips the most recent "skip_count" stack frames (also skips the
// frame generated for the "GetStackFrames" routine itself), and then
// records the pc values for up to the next "max_depth" frames in
// "result", and the corresponding stack frame sizes in "sizes".
// Returns the number of values recorded in "result"/"sizes".
//
// Example:
// main() { foo(); }
// foo() { bar(); }
// bar() {
// void* result[10];
// int sizes[10];
// int depth = GetStackFrames(result, sizes, 10, 1);
// }
//
// The GetStackFrames call will skip the frame for "bar". It will
// return 2 and will produce pc values that map to the following
// procedures:
// result[0] foo
// result[1] main
// (Actually, there may be a few more entries after "main" to account for
// startup procedures.)
// And corresponding stack frame sizes will also be recorded:
// sizes[0] 16
// sizes[1] 16
// (Stack frame sizes of 16 above are just for illustration purposes.)
// Stack frame sizes of 0 or less indicate that those frame sizes couldn't
// be identified.
//
// This routine may return fewer stack frame entries than are
// available. Also note that "result" and "sizes" must both be non-NULL.
extern PERFTOOLS_DLL_DECL int GetStackFrames(void** result, int* sizes, int max_depth,
int skip_count);
// Same as above, but to be used from a signal handler. The "uc" parameter
// should be the pointer to ucontext_t which was passed as the 3rd parameter
// to sa_sigaction signal handler. It may help the unwinder to get a
// better stack trace under certain conditions. The "uc" may safely be NULL.
extern PERFTOOLS_DLL_DECL int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
int skip_count, const void *uc);
// This is similar to the GetStackFrames routine, except that it returns
// the stack trace only, and not the stack frame sizes as well.
// Example:
// main() { foo(); }
// foo() { bar(); }
// bar() {
// void* result[10];
// int depth = GetStackTrace(result, 10, 1);
// }
//
// This produces:
// result[0] foo
// result[1] main
// .... ...
//
// "result" must not be NULL.
extern PERFTOOLS_DLL_DECL int GetStackTrace(void** result, int max_depth,
int skip_count);
// Same as above, but to be used from a signal handler. The "uc" parameter
// should be the pointer to ucontext_t which was passed as the 3rd parameter
// to sa_sigaction signal handler. It may help the unwinder to get a
// better stack trace under certain conditions. The "uc" may safely be NULL.
extern PERFTOOLS_DLL_DECL int GetStackTraceWithContext(void** result, int max_depth,
int skip_count, const void *uc);
#endif /* GOOGLE_STACKTRACE_H_ */

View File

@ -1,160 +0,0 @@
// -*- Mode: C; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2003, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat <opensource@google.com>
* .h file by Craig Silverstein <opensource@google.com>
*/
#ifndef TCMALLOC_TCMALLOC_H_
#define TCMALLOC_TCMALLOC_H_
#include <stddef.h> /* for size_t */
/* Define the version number so folks can check against it */
#define TC_VERSION_MAJOR 2
#define TC_VERSION_MINOR 5
#define TC_VERSION_PATCH ""
#define TC_VERSION_STRING "gperftools 2.5"
/* For struct mallinfo, if it's defined. */
#if !defined(__APPLE__) && !defined(__FreeBSD__)
# include <malloc.h>
#else
struct mallinfo {
size_t arena; /* non-mmapped space allocated from system */
size_t ordblks; /* number of free chunks */
size_t smblks; /* always 0 */
size_t hblks; /* always 0 */
size_t hblkhd; /* space in mmapped regions */
size_t usmblks; /* maximum total allocated space */
size_t fsmblks; /* always 0 */
size_t uordblks; /* total allocated space */
size_t fordblks; /* total free space */
size_t keepcost; /* releasable (via malloc_trim) space */
};
#endif
#ifdef __cplusplus
#define PERFTOOLS_THROW throw()
#else
# ifdef __GNUC__
# define PERFTOOLS_THROW __attribute__((__nothrow__))
# else
# define PERFTOOLS_THROW
# endif
#endif
#ifndef PERFTOOLS_DLL_DECL
#define PERFTOOLS_DLL_DECL_DEFINED
# ifdef _WIN32
# define PERFTOOLS_DLL_DECL __declspec(dllimport)
# else
# define PERFTOOLS_DLL_DECL
# endif
#endif
#ifdef __cplusplus
namespace std {
struct nothrow_t;
}
extern "C" {
#endif
/*
* Returns a human-readable version string. If major, minor,
* and/or patch are not NULL, they are set to the major version,
* minor version, and patch-code (a string, usually "").
*/
PERFTOOLS_DLL_DECL const char* tc_version(int* major, int* minor,
const char** patch) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_malloc_skip_new_handler(size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_free(void* ptr) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_free_sized(void *ptr, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_realloc(void* ptr, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_calloc(size_t nmemb, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_cfree(void* ptr) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_memalign(size_t __alignment,
size_t __size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL int tc_posix_memalign(void** ptr,
size_t align, size_t size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_valloc(size_t __size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_pvalloc(size_t __size) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_malloc_stats(void) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) PERFTOOLS_THROW;
#if 1
PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) PERFTOOLS_THROW;
#endif
/*
* This is an alias for MallocExtension::instance()->GetAllocatedSize().
* It is equivalent to
* OS X: malloc_size()
* glibc: malloc_usable_size()
* Windows: _msize()
*/
PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) PERFTOOLS_THROW;
#ifdef __cplusplus
PERFTOOLS_DLL_DECL int tc_set_new_mode(int flag) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_new(size_t size);
PERFTOOLS_DLL_DECL void* tc_new_nothrow(size_t size,
const std::nothrow_t&) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_delete(void* p) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_delete_sized(void* p, size_t size) throw();
PERFTOOLS_DLL_DECL void tc_delete_nothrow(void* p,
const std::nothrow_t&) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void* tc_newarray(size_t size);
PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(size_t size,
const std::nothrow_t&) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_deletearray(void* p) PERFTOOLS_THROW;
PERFTOOLS_DLL_DECL void tc_deletearray_sized(void* p, size_t size) throw();
PERFTOOLS_DLL_DECL void tc_deletearray_nothrow(void* p,
const std::nothrow_t&) PERFTOOLS_THROW;
}
#endif
/* We're only un-defining those for public */
#if !defined(GPERFTOOLS_CONFIG_H_)
#undef PERFTOOLS_THROW
#ifdef PERFTOOLS_DLL_DECL_DEFINED
#undef PERFTOOLS_DLL_DECL
#undef PERFTOOLS_DLL_DECL_DEFINED
#endif
#endif /* GPERFTOOLS_CONFIG_H_ */
#endif /* #ifndef TCMALLOC_TCMALLOC_H_ */

View File

@ -1,422 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// A fast map from addresses to values. Assumes that addresses are
// clustered. The main use is intended to be for heap-profiling.
// May be too memory-hungry for other uses.
//
// We use a user-defined allocator/de-allocator so that we can use
// this data structure during heap-profiling.
//
// IMPLEMENTATION DETAIL:
//
// Some default definitions/parameters:
// * Block -- aligned 128-byte region of the address space
// * Cluster -- aligned 1-MB region of the address space
// * Block-ID -- block-number within a cluster
// * Cluster-ID -- Starting address of cluster divided by cluster size
//
// We use a three-level map to represent the state:
// 1. A hash-table maps from a cluster-ID to the data for that cluster.
// 2. For each non-empty cluster we keep an array indexed by
// block-ID tht points to the first entry in the linked-list
// for the block.
// 3. At the bottom, we keep a singly-linked list of all
// entries in a block (for non-empty blocks).
//
// hash table
// +-------------+
// | id->cluster |---> ...
// | ... |
// | id->cluster |---> Cluster
// +-------------+ +-------+ Data for one block
// | nil | +------------------------------------+
// | ----+---|->[addr/value]-->[addr/value]-->... |
// | nil | +------------------------------------+
// | ----+--> ...
// | nil |
// | ... |
// +-------+
//
// Note that we require zero-bytes of overhead for completely empty
// clusters. The minimum space requirement for a cluster is the size
// of the hash-table entry plus a pointer value for each block in
// the cluster. Empty blocks impose no extra space requirement.
//
// The cost of a lookup is:
// a. A hash-table lookup to find the cluster
// b. An array access in the cluster structure
// c. A traversal over the linked-list for a block
#ifndef BASE_ADDRESSMAP_INL_H_
#define BASE_ADDRESSMAP_INL_H_
#include "config.h"
#include <stddef.h>
#include <string.h>
#if defined HAVE_STDINT_H
#include <stdint.h> // to get uint16_t (ISO naming madness)
#elif defined HAVE_INTTYPES_H
#include <inttypes.h> // another place uint16_t might be defined
#else
#include <sys/types.h> // our last best hope
#endif
// This class is thread-unsafe -- that is, instances of this class can
// not be accessed concurrently by multiple threads -- because the
// callback function for Iterate() may mutate contained values. If the
// callback functions you pass do not mutate their Value* argument,
// AddressMap can be treated as thread-compatible -- that is, it's
// safe for multiple threads to call "const" methods on this class,
// but not safe for one thread to call const methods on this class
// while another thread is calling non-const methods on the class.
template <class Value>
class AddressMap {
public:
typedef void* (*Allocator)(size_t size);
typedef void (*DeAllocator)(void* ptr);
typedef const void* Key;
// Create an AddressMap that uses the specified allocator/deallocator.
// The allocator/deallocator should behave like malloc/free.
// For instance, the allocator does not need to return initialized memory.
AddressMap(Allocator alloc, DeAllocator dealloc);
~AddressMap();
// If the map contains an entry for "key", return it. Else return NULL.
inline const Value* Find(Key key) const;
inline Value* FindMutable(Key key);
// Insert <key,value> into the map. Any old value associated
// with key is forgotten.
void Insert(Key key, Value value);
// Remove any entry for key in the map. If an entry was found
// and removed, stores the associated value in "*removed_value"
// and returns true. Else returns false.
bool FindAndRemove(Key key, Value* removed_value);
// Similar to Find but we assume that keys are addresses of non-overlapping
// memory ranges whose sizes are given by size_func.
// If the map contains a range into which "key" points
// (at its start or inside of it, but not at the end),
// return the address of the associated value
// and store its key in "*res_key".
// Else return NULL.
// max_size specifies largest range size possibly in existence now.
typedef size_t (*ValueSizeFunc)(const Value& v);
const Value* FindInside(ValueSizeFunc size_func, size_t max_size,
Key key, Key* res_key);
// Iterate over the address map calling 'callback'
// for all stored key-value pairs and passing 'arg' to it.
// We don't use full Closure/Callback machinery not to add
// unnecessary dependencies to this class with low-level uses.
template<class Type>
inline void Iterate(void (*callback)(Key, Value*, Type), Type arg) const;
private:
typedef uintptr_t Number;
// The implementation assumes that addresses inserted into the map
// will be clustered. We take advantage of this fact by splitting
// up the address-space into blocks and using a linked-list entry
// for each block.
// Size of each block. There is one linked-list for each block, so
// do not make the block-size too big. Oterwise, a lot of time
// will be spent traversing linked lists.
static const int kBlockBits = 7;
static const int kBlockSize = 1 << kBlockBits;
// Entry kept in per-block linked-list
struct Entry {
Entry* next;
Key key;
Value value;
};
// We further group a sequence of consecutive blocks into a cluster.
// The data for a cluster is represented as a dense array of
// linked-lists, one list per contained block.
static const int kClusterBits = 13;
static const Number kClusterSize = 1 << (kBlockBits + kClusterBits);
static const int kClusterBlocks = 1 << kClusterBits;
// We use a simple chaining hash-table to represent the clusters.
struct Cluster {
Cluster* next; // Next cluster in hash table chain
Number id; // Cluster ID
Entry* blocks[kClusterBlocks]; // Per-block linked-lists
};
// Number of hash-table entries. With the block-size/cluster-size
// defined above, each cluster covers 1 MB, so an 4K entry
// hash-table will give an average hash-chain length of 1 for 4GB of
// in-use memory.
static const int kHashBits = 12;
static const int kHashSize = 1 << 12;
// Number of entry objects allocated at a time
static const int ALLOC_COUNT = 64;
Cluster** hashtable_; // The hash-table
Entry* free_; // Free list of unused Entry objects
// Multiplicative hash function:
// The value "kHashMultiplier" is the bottom 32 bits of
// int((sqrt(5)-1)/2 * 2^32)
// This is a good multiplier as suggested in CLR, Knuth. The hash
// value is taken to be the top "k" bits of the bottom 32 bits
// of the muliplied value.
static const uint32_t kHashMultiplier = 2654435769u;
static int HashInt(Number x) {
// Multiply by a constant and take the top bits of the result.
const uint32_t m = static_cast<uint32_t>(x) * kHashMultiplier;
return static_cast<int>(m >> (32 - kHashBits));
}
// Find cluster object for specified address. If not found
// and "create" is true, create the object. If not found
// and "create" is false, return NULL.
//
// This method is bitwise-const if create is false.
Cluster* FindCluster(Number address, bool create) {
// Look in hashtable
const Number cluster_id = address >> (kBlockBits + kClusterBits);
const int h = HashInt(cluster_id);
for (Cluster* c = hashtable_[h]; c != NULL; c = c->next) {
if (c->id == cluster_id) {
return c;
}
}
// Create cluster if necessary
if (create) {
Cluster* c = New<Cluster>(1);
c->id = cluster_id;
c->next = hashtable_[h];
hashtable_[h] = c;
return c;
}
return NULL;
}
// Return the block ID for an address within its cluster
static int BlockID(Number address) {
return (address >> kBlockBits) & (kClusterBlocks - 1);
}
//--------------------------------------------------------------
// Memory management -- we keep all objects we allocate linked
// together in a singly linked list so we can get rid of them
// when we are all done. Furthermore, we allow the client to
// pass in custom memory allocator/deallocator routines.
//--------------------------------------------------------------
struct Object {
Object* next;
// The real data starts here
};
Allocator alloc_; // The allocator
DeAllocator dealloc_; // The deallocator
Object* allocated_; // List of allocated objects
// Allocates a zeroed array of T with length "num". Also inserts
// the allocated block into a linked list so it can be deallocated
// when we are all done.
template <class T> T* New(int num) {
void* ptr = (*alloc_)(sizeof(Object) + num*sizeof(T));
memset(ptr, 0, sizeof(Object) + num*sizeof(T));
Object* obj = reinterpret_cast<Object*>(ptr);
obj->next = allocated_;
allocated_ = obj;
return reinterpret_cast<T*>(reinterpret_cast<Object*>(ptr) + 1);
}
};
// More implementation details follow:
template <class Value>
AddressMap<Value>::AddressMap(Allocator alloc, DeAllocator dealloc)
: free_(NULL),
alloc_(alloc),
dealloc_(dealloc),
allocated_(NULL) {
hashtable_ = New<Cluster*>(kHashSize);
}
template <class Value>
AddressMap<Value>::~AddressMap() {
// De-allocate all of the objects we allocated
for (Object* obj = allocated_; obj != NULL; /**/) {
Object* next = obj->next;
(*dealloc_)(obj);
obj = next;
}
}
template <class Value>
inline const Value* AddressMap<Value>::Find(Key key) const {
return const_cast<AddressMap*>(this)->FindMutable(key);
}
template <class Value>
inline Value* AddressMap<Value>::FindMutable(Key key) {
const Number num = reinterpret_cast<Number>(key);
const Cluster* const c = FindCluster(num, false/*do not create*/);
if (c != NULL) {
for (Entry* e = c->blocks[BlockID(num)]; e != NULL; e = e->next) {
if (e->key == key) {
return &e->value;
}
}
}
return NULL;
}
template <class Value>
void AddressMap<Value>::Insert(Key key, Value value) {
const Number num = reinterpret_cast<Number>(key);
Cluster* const c = FindCluster(num, true/*create*/);
// Look in linked-list for this block
const int block = BlockID(num);
for (Entry* e = c->blocks[block]; e != NULL; e = e->next) {
if (e->key == key) {
e->value = value;
return;
}
}
// Create entry
if (free_ == NULL) {
// Allocate a new batch of entries and add to free-list
Entry* array = New<Entry>(ALLOC_COUNT);
for (int i = 0; i < ALLOC_COUNT-1; i++) {
array[i].next = &array[i+1];
}
array[ALLOC_COUNT-1].next = free_;
free_ = &array[0];
}
Entry* e = free_;
free_ = e->next;
e->key = key;
e->value = value;
e->next = c->blocks[block];
c->blocks[block] = e;
}
template <class Value>
bool AddressMap<Value>::FindAndRemove(Key key, Value* removed_value) {
const Number num = reinterpret_cast<Number>(key);
Cluster* const c = FindCluster(num, false/*do not create*/);
if (c != NULL) {
for (Entry** p = &c->blocks[BlockID(num)]; *p != NULL; p = &(*p)->next) {
Entry* e = *p;
if (e->key == key) {
*removed_value = e->value;
*p = e->next; // Remove e from linked-list
e->next = free_; // Add e to free-list
free_ = e;
return true;
}
}
}
return false;
}
template <class Value>
const Value* AddressMap<Value>::FindInside(ValueSizeFunc size_func,
size_t max_size,
Key key,
Key* res_key) {
const Number key_num = reinterpret_cast<Number>(key);
Number num = key_num; // we'll move this to move back through the clusters
while (1) {
const Cluster* c = FindCluster(num, false/*do not create*/);
if (c != NULL) {
while (1) {
const int block = BlockID(num);
bool had_smaller_key = false;
for (const Entry* e = c->blocks[block]; e != NULL; e = e->next) {
const Number e_num = reinterpret_cast<Number>(e->key);
if (e_num <= key_num) {
if (e_num == key_num || // to handle 0-sized ranges
key_num < e_num + (*size_func)(e->value)) {
*res_key = e->key;
return &e->value;
}
had_smaller_key = true;
}
}
if (had_smaller_key) return NULL; // got a range before 'key'
// and it did not contain 'key'
if (block == 0) break;
// try address-wise previous block
num |= kBlockSize - 1; // start at the last addr of prev block
num -= kBlockSize;
if (key_num - num > max_size) return NULL;
}
}
if (num < kClusterSize) return NULL; // first cluster
// go to address-wise previous cluster to try
num |= kClusterSize - 1; // start at the last block of previous cluster
num -= kClusterSize;
if (key_num - num > max_size) return NULL;
// Having max_size to limit the search is crucial: else
// we have to traverse a lot of empty clusters (or blocks).
// We can avoid needing max_size if we put clusters into
// a search tree, but performance suffers considerably
// if we use this approach by using stl::set.
}
}
template <class Value>
template <class Type>
inline void AddressMap<Value>::Iterate(void (*callback)(Key, Value*, Type),
Type arg) const {
// We could optimize this by traversing only non-empty clusters and/or blocks
// but it does not speed up heap-checker noticeably.
for (int h = 0; h < kHashSize; ++h) {
for (const Cluster* c = hashtable_[h]; c != NULL; c = c->next) {
for (int b = 0; b < kClusterBlocks; ++b) {
for (Entry* e = c->blocks[b]; e != NULL; e = e->next) {
callback(e->key, &e->value, arg);
}
}
}
}
}
#endif // BASE_ADDRESSMAP_INL_H_

View File

@ -1,84 +0,0 @@
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Author: Alexander Levitskiy
//
// Generalizes the plethora of ARM flavors available to an easier to manage set
// Defs reference is at https://wiki.edubuntu.org/ARM/Thumb2PortingHowto
#ifndef ARM_INSTRUCTION_SET_SELECT_H_
#define ARM_INSTRUCTION_SET_SELECT_H_
#if defined(__ARM_ARCH_8A__)
# define ARMV8 1
#endif
#if defined(ARMV8) || \
defined(__ARM_ARCH_7__) || \
defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7A__)
# define ARMV7 1
#endif
#if defined(ARMV7) || \
defined(__ARM_ARCH_6__) || \
defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || \
defined(__ARM_ARCH_6Z__) || \
defined(__ARM_ARCH_6T2__) || \
defined(__ARM_ARCH_6ZK__)
# define ARMV6 1
#endif
#if defined(ARMV6) || \
defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5E__) || \
defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__)
# define ARMV5 1
#endif
#if defined(ARMV5) || \
defined(__ARM_ARCH_4__) || \
defined(__ARM_ARCH_4T__)
# define ARMV4 1
#endif
#if defined(ARMV4) || \
defined(__ARM_ARCH_3__) || \
defined(__ARM_ARCH_3M__)
# define ARMV3 1
#endif
#if defined(ARMV3) || \
defined(__ARM_ARCH_2__)
# define ARMV2 1
#endif
#endif // ARM_INSTRUCTION_SET_SELECT_H_

View File

@ -1,228 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2003, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// Author: Lei Zhang, Sasha Levitskiy
//
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
// LinuxKernelCmpxchg is from Google Gears.
#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
#define BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h"
typedef int32_t Atomic32;
namespace base {
namespace subtle {
typedef int64_t Atomic64;
// 0xffff0fc0 is the hard coded address of a function provided by
// the kernel which implements an atomic compare-exchange. On older
// ARM architecture revisions (pre-v6) this may be implemented using
// a syscall. This address is stable, and in active use (hard coded)
// by at least glibc-2.7 and the Android C library.
// pLinuxKernelCmpxchg has both acquire and release barrier sematincs.
typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
Atomic32 new_value,
volatile Atomic32* ptr);
LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg ATTRIBUTE_WEAK =
(LinuxKernelCmpxchgFunc) 0xffff0fc0;
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier ATTRIBUTE_WEAK =
(LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = *ptr;
do {
if (!pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
pLinuxKernelMemoryBarrier();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions are not implemented yet.
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
function_name);
abort();
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_CompareAndSwap");
return 0;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_AtomicExchange");
return 0;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// pLinuxKernelCmpxchg already has acquire and release barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("NoBarrier_Store");
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("Acquire_Store64");
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("Release_Store");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("NoBarrier_Load");
return 0;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("Atomic64 Acquire_Load");
return 0;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("Atomic64 Release_Load");
return 0;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("Atomic64 Acquire_CompareAndSwap");
return 0;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("Atomic64 Release_CompareAndSwap");
return 0;
}
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_

View File

@ -1,330 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2011, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// Author: Sasha Levitskiy
// based on atomicops-internals by Sanjay Ghemawat
//
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
// This code implements ARM atomics for architectures V6 and newer.
#ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
#define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h" // For COMPILE_ASSERT
// The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6,
// only some variants support it. For simplicity, we only use exclusive
// 64-bit load/store in V7 or above.
#if defined(ARMV7)
# define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
#endif
typedef int32_t Atomic32;
namespace base {
namespace subtle {
typedef int64_t Atomic64;
// 32-bit low-level ops
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 oldval, res;
do {
__asm__ __volatile__(
"ldrex %1, [%3]\n"
"mov %0, #0\n"
"teq %1, %4\n"
// The following IT (if-then) instruction is needed for the subsequent
// conditional instruction STREXEQ when compiling in THUMB mode.
// In ARM mode, the compiler/assembler will not generate any code for it.
"it eq\n"
"strexeq %0, %5, [%3]\n"
: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
: "r" (ptr), "Ir" (old_value), "r" (new_value)
: "cc");
} while (res);
return oldval;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 tmp, old;
__asm__ __volatile__(
"1:\n"
"ldrex %1, [%2]\n"
"strex %0, %3, [%2]\n"
"teq %0, #0\n"
"bne 1b"
: "=&r" (tmp), "=&r" (old)
: "r" (ptr), "r" (new_value)
: "cc", "memory");
return old;
}
inline void MemoryBarrier() {
#if !defined(ARMV7)
uint32_t dest = 0;
__asm__ __volatile__("mcr p15,0,%0,c7,c10,5" :"=&r"(dest) : : "memory");
#else
__asm__ __volatile__("dmb" : : : "memory");
#endif
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions are only available if LDREXD and STREXD instructions
// are available.
#ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
#define BASE_HAS_ATOMIC64 1
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 oldval, res;
do {
__asm__ __volatile__(
"ldrexd %1, [%3]\n"
"mov %0, #0\n"
"teq %Q1, %Q4\n"
// The following IT (if-then) instructions are needed for the subsequent
// conditional instructions when compiling in THUMB mode.
// In ARM mode, the compiler/assembler will not generate any code for it.
"it eq\n"
"teqeq %R1, %R4\n"
"it eq\n"
"strexdeq %0, %5, [%3]\n"
: "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
: "r" (ptr), "Ir" (old_value), "r" (new_value)
: "cc");
} while (res);
return oldval;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
int store_failed;
Atomic64 old;
__asm__ __volatile__(
"1:\n"
"ldrexd %1, [%2]\n"
"strexd %0, %3, [%2]\n"
"teq %0, #0\n"
"bne 1b"
: "=&r" (store_failed), "=&r" (old)
: "r" (ptr), "r" (new_value)
: "cc", "memory");
return old;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
int store_failed;
Atomic64 dummy;
__asm__ __volatile__(
"1:\n"
// Dummy load to lock cache line.
"ldrexd %1, [%3]\n"
"strexd %0, %2, [%3]\n"
"teq %0, #0\n"
"bne 1b"
: "=&r" (store_failed), "=&r"(dummy)
: "r"(value), "r" (ptr)
: "cc", "memory");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
Atomic64 res;
__asm__ __volatile__(
"ldrexd %0, [%1]\n"
"clrex\n"
: "=r" (res)
: "r"(ptr), "Q"(*ptr));
return res;
}
#else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
function_name);
abort();
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_CompareAndSwap");
return 0;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("NoBarrier_AtomicExchange");
return 0;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("Acquire_AtomicExchange");
return 0;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
NotImplementedFatalError("Release_AtomicExchange");
return 0;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("NoBarrier_Store");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
NotImplementedFatalError("NoBarrier_Load");
return 0;
}
#endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
NoBarrier_Store(ptr, value);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = NoBarrier_Load(ptr);
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
} // namespace subtle ends
} // namespace base ends
#endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_

View File

@ -1,203 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2014, Linaro
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// Author: Riku Voipio, riku.voipio@linaro.org
//
// atomic primitives implemented with gcc atomic intrinsics:
// http://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
//
#ifndef BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_
#define BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h"
typedef int32_t Atomic32;
namespace base {
namespace subtle {
typedef int64_t Atomic64;
inline void MemoryBarrier() {
__sync_synchronize();
}
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_RELAXED);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_ACQUIRE);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_RELEASE);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
return prev_value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_RELAXED);
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_ACQUIRE);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_RELEASE);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value = old_value;
__atomic_compare_exchange_n(ptr, &prev_value, new_value,
0, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
return prev_value;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_

View File

@ -1,437 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
*/
// Implementation of atomic operations for ppc-linux. This file should not
// be included directly. Clients should instead include
// "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
#define BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
typedef int32_t Atomic32;
#ifdef __PPC64__
#define BASE_HAS_ATOMIC64 1
#endif
namespace base {
namespace subtle {
static inline void _sync(void) {
__asm__ __volatile__("sync": : : "memory");
}
static inline void _lwsync(void) {
// gcc defines __NO_LWSYNC__ when appropriate; see
// http://gcc.gnu.org/ml/gcc-patches/2006-11/msg01238.html
#ifdef __NO_LWSYNC__
__asm__ __volatile__("msync": : : "memory");
#else
__asm__ __volatile__("lwsync": : : "memory");
#endif
}
static inline void _isync(void) {
__asm__ __volatile__("isync": : : "memory");
}
static inline Atomic32 OSAtomicAdd32(Atomic32 amount, Atomic32 *value) {
Atomic32 t;
__asm__ __volatile__(
"1: lwarx %0,0,%3\n\
add %0,%2,%0\n\
stwcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (t), "+m" (*value)
: "r" (amount), "r" (value)
: "cc");
return t;
}
static inline Atomic32 OSAtomicAdd32Barrier(Atomic32 amount, Atomic32 *value) {
Atomic32 t;
_lwsync();
t = OSAtomicAdd32(amount, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in OSAtomicAdd32) has a
// conditional branch with a data dependency on the update.
// Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline bool OSAtomicCompareAndSwap32(Atomic32 old_value,
Atomic32 new_value,
Atomic32 *value) {
Atomic32 prev;
__asm__ __volatile__(
"1: lwarx %0,0,%2\n\
cmpw 0,%0,%3\n\
bne- 2f\n\
stwcx. %4,0,%2\n\
bne- 1b\n\
2:"
: "=&r" (prev), "+m" (*value)
: "r" (value), "r" (old_value), "r" (new_value)
: "cc");
return prev == old_value;
}
static inline Atomic32 OSAtomicCompareAndSwap32Acquire(Atomic32 old_value,
Atomic32 new_value,
Atomic32 *value) {
Atomic32 t;
t = OSAtomicCompareAndSwap32(old_value, new_value, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in
// OSAtomicCompareAndSwap32) has a conditional branch with a data
// dependency on the update. Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline Atomic32 OSAtomicCompareAndSwap32Release(Atomic32 old_value,
Atomic32 new_value,
Atomic32 *value) {
_lwsync();
return OSAtomicCompareAndSwap32(old_value, new_value, value);
}
typedef int64_t Atomic64;
inline void MemoryBarrier() {
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
}
// 32-bit Versions.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32Acquire(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32Release(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Acquire(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Release(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
#ifdef __PPC64__
// 64-bit Versions.
static inline Atomic64 OSAtomicAdd64(Atomic64 amount, Atomic64 *value) {
Atomic64 t;
__asm__ __volatile__(
"1: ldarx %0,0,%3\n\
add %0,%2,%0\n\
stdcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (t), "+m" (*value)
: "r" (amount), "r" (value)
: "cc");
return t;
}
static inline Atomic64 OSAtomicAdd64Barrier(Atomic64 amount, Atomic64 *value) {
Atomic64 t;
_lwsync();
t = OSAtomicAdd64(amount, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in OSAtomicAdd64) has a
// conditional branch with a data dependency on the update.
// Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline bool OSAtomicCompareAndSwap64(Atomic64 old_value,
Atomic64 new_value,
Atomic64 *value) {
Atomic64 prev;
__asm__ __volatile__(
"1: ldarx %0,0,%2\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
stdcx. %4,0,%2\n\
bne- 1b\n\
2:"
: "=&r" (prev), "+m" (*value)
: "r" (value), "r" (old_value), "r" (new_value)
: "cc");
return prev == old_value;
}
static inline Atomic64 OSAtomicCompareAndSwap64Acquire(Atomic64 old_value,
Atomic64 new_value,
Atomic64 *value) {
Atomic64 t;
t = OSAtomicCompareAndSwap64(old_value, new_value, value);
// This is based on the code snippet in the architecture manual (Vol
// 2, Appendix B). It's a little tricky: correctness depends on the
// fact that the code right before this (in
// OSAtomicCompareAndSwap64) has a conditional branch with a data
// dependency on the update. Otherwise, we'd have to use sync.
_isync();
return t;
}
static inline Atomic64 OSAtomicCompareAndSwap64Release(Atomic64 old_value,
Atomic64 new_value,
Atomic64 *value) {
_lwsync();
return OSAtomicCompareAndSwap64(old_value, new_value, value);
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64Acquire(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64Release(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Acquire(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Release(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
#endif
inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
}
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
_lwsync();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
Atomic32 value = *ptr;
_lwsync();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
return *ptr;
}
#ifdef __PPC64__
// 64-bit Versions.
inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
*ptr = value;
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
_lwsync();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
Atomic64 value = *ptr;
_lwsync();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
// This can't be _lwsync(); we need to order the immediately
// preceding stores against any load that may follow, but lwsync
// doesn't guarantee that.
_sync();
return *ptr;
}
#endif
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_

View File

@ -1,370 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Implementation of atomic operations for Mac OS X. This file should not
// be included directly. Clients should instead include
// "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_MACOSX_H_
#define BASE_ATOMICOPS_INTERNALS_MACOSX_H_
typedef int32_t Atomic32;
// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
// on the Mac, even when they are the same size. Similarly, on __ppc64__,
// AtomicWord and Atomic64 are always different. Thus, we need explicit
// casting.
#ifdef __LP64__
#define AtomicWordCastType base::subtle::Atomic64
#else
#define AtomicWordCastType Atomic32
#endif
#if defined(__LP64__) || defined(__i386__)
#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
#endif
#include <libkern/OSAtomic.h>
namespace base {
namespace subtle {
#if !defined(__LP64__) && defined(__ppc__)
// The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
// while the underlying assembly instructions are available only some
// implementations of PowerPC.
// The following inline functions will fail with the error message at compile
// time ONLY IF they are called. So it is safe to use this header if user
// code only calls AtomicWord and Atomic32 operations.
//
// NOTE(vchen): Implementation notes to implement the atomic ops below may
// be found in "PowerPC Virtual Environment Architecture, Book II,
// Version 2.02", January 28, 2005, Appendix B, page 46. Unfortunately,
// extra care must be taken to ensure data are properly 8-byte aligned, and
// that data are returned correctly according to Mac OS X ABI specs.
inline int64_t OSAtomicCompareAndSwap64(
int64_t oldValue, int64_t newValue, int64_t *theValue) {
__asm__ __volatile__(
"_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
return 0;
}
inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
__asm__ __volatile__(
"_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
return 0;
}
inline int64_t OSAtomicCompareAndSwap64Barrier(
int64_t oldValue, int64_t newValue, int64_t *theValue) {
int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
OSMemoryBarrier();
return prev;
}
inline int64_t OSAtomicAdd64Barrier(
int64_t theAmount, int64_t *theValue) {
int64_t new_val = OSAtomicAdd64(theAmount, theValue);
OSMemoryBarrier();
return new_val;
}
#endif
typedef int64_t Atomic64;
inline void MemoryBarrier() {
OSMemoryBarrier();
}
// 32-bit Versions.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
return Acquire_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit version
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
return Acquire_AtomicExchange(ptr, new_value);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
// The lib kern interface does not distinguish between
// Acquire and Release memory barriers; they are equivalent.
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
#ifdef __LP64__
// 64-bit implementation on 64-bit platform
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
MemoryBarrier();
return *ptr;
}
#else
// 64-bit implementation on 32-bit platform
#if defined(__ppc__)
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__(
"_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
__asm__ __volatile__(
"_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
return 0;
}
#elif defined(__i386__)
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Reset FP registers
: "=m" (*ptr)
: "m" (value)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
Atomic64 value;
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Reset FP registers
: "=m" (value)
: "m" (*ptr)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
return value;
}
#endif
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
MemoryBarrier();
NoBarrier_Store(ptr, value);
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
Atomic64 value = NoBarrier_Load(ptr);
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
#endif // __LP64__
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_MACOSX_H_

View File

@ -1,323 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2013, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Author: Jovan Zelincevic <jovan.zelincevic@imgtec.com>
// based on atomicops-internals by Sanjay Ghemawat
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
// This code implements MIPS atomics.
#ifndef BASE_ATOMICOPS_INTERNALS_MIPS_H_
#define BASE_ATOMICOPS_INTERNALS_MIPS_H_
#if (_MIPS_ISA == _MIPS_ISA_MIPS64)
#define BASE_HAS_ATOMIC64 1
#endif
typedef int32_t Atomic32;
namespace base {
namespace subtle {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value)
{
Atomic32 prev, tmp;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"ll %0, %5 \n" // prev = *ptr
"bne %0, %3, 2f \n" // if (prev != old_value) goto 2
" move %2, %4 \n" // tmp = new_value
"sc %2, %1 \n" // *ptr = tmp (with atomic check)
"beqz %2, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
"2: \n"
".set pop \n"
: "=&r" (prev), "=m" (*ptr),
"=&r" (tmp)
: "Ir" (old_value), "r" (new_value),
"m" (*ptr)
: "memory"
);
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value)
{
Atomic32 temp, old;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"ll %1, %2 \n" // old = *ptr
"move %0, %3 \n" // temp = new_value
"sc %0, %2 \n" // *ptr = temp (with atomic check)
"beqz %0, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
".set pop \n"
: "=&r" (temp), "=&r" (old),
"=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory"
);
return old;
}
inline void MemoryBarrier()
{
__asm__ volatile("sync" : : : "memory");
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value)
{
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value)
{
MemoryBarrier();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return res;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value)
{
*ptr = value;
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value)
{
Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value)
{
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value)
{
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value)
{
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr)
{
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr)
{
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr)
{
MemoryBarrier();
return *ptr;
}
#if (_MIPS_ISA == _MIPS_ISA_MIPS64) || (_MIPS_SIM == _MIPS_SIM_ABI64)
typedef int64_t Atomic64;
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value)
{
Atomic64 prev, tmp;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"lld %0, %5 \n" // prev = *ptr
"bne %0, %3, 2f \n" // if (prev != old_value) goto 2
" move %2, %4 \n" // tmp = new_value
"scd %2, %1 \n" // *ptr = tmp (with atomic check)
"beqz %2, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
"2: \n"
".set pop \n"
: "=&r" (prev), "=m" (*ptr),
"=&r" (tmp)
: "Ir" (old_value), "r" (new_value),
"m" (*ptr)
: "memory"
);
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value)
{
Atomic64 temp, old;
__asm__ volatile(
".set push \n"
".set noreorder \n"
"1: \n"
"lld %1, %2 \n" // old = *ptr
"move %0, %3 \n" // temp = new_value
"scd %0, %2 \n" // *ptr = temp (with atomic check)
"beqz %0, 1b \n" // start again on atomic error
" nop \n" // delay slot nop
".set pop \n"
: "=&r" (temp), "=&r" (old),
"=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory"
);
return old;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value)
{
Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
MemoryBarrier();
return old_value;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value)
{
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value)
{
MemoryBarrier();
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return res;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value)
{
*ptr = value;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value)
{
MemoryBarrier();
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value)
{
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value)
{
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr)
{
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr)
{
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr)
{
MemoryBarrier();
return *ptr;
}
#endif
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_MIPS_H_

View File

@ -1,457 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// Implementation of atomic operations using Windows API
// functions. This file should not be included directly. Clients
// should instead include "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_WINDOWS_H_
#define BASE_ATOMICOPS_INTERNALS_WINDOWS_H_
#include <stdio.h>
#include <stdlib.h>
#include "base/basictypes.h" // For COMPILE_ASSERT
typedef int32 Atomic32;
#if defined(_WIN64)
#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
#endif
namespace base {
namespace subtle {
typedef int64 Atomic64;
// 32-bit low-level operations on any platform
extern "C" {
// We use windows intrinsics when we can (they seem to be supported
// well on MSVC 8.0 and above). Unfortunately, in some
// environments, <windows.h> and <intrin.h> have conflicting
// declarations of some other intrinsics, breaking compilation:
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
// Therefore, we simply declare the relevant intrinsics ourself.
// MinGW has a bug in the header files where it doesn't indicate the
// first argument is volatile -- they're not up to date. See
// http://readlist.com/lists/lists.sourceforge.net/mingw-users/0/3861.html
// We have to const_cast away the volatile to avoid compiler warnings.
// TODO(csilvers): remove this once MinGW has updated MinGW/include/winbase.h
#if defined(__MINGW32__)
inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
LONG newval, LONG oldval) {
return ::InterlockedCompareExchange(const_cast<LONG*>(ptr), newval, oldval);
}
inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
return ::InterlockedExchange(const_cast<LONG*>(ptr), newval);
}
inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
return ::InterlockedExchangeAdd(const_cast<LONG*>(ptr), increment);
}
#elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0
// Unfortunately, in some environments, <windows.h> and <intrin.h>
// have conflicting declarations of some intrinsics, breaking
// compilation. So we declare the intrinsics we need ourselves. See
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
LONG _InterlockedCompareExchange(volatile LONG* ptr, LONG newval, LONG oldval);
#pragma intrinsic(_InterlockedCompareExchange)
inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
LONG newval, LONG oldval) {
return _InterlockedCompareExchange(ptr, newval, oldval);
}
LONG _InterlockedExchange(volatile LONG* ptr, LONG newval);
#pragma intrinsic(_InterlockedExchange)
inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
return _InterlockedExchange(ptr, newval);
}
LONG _InterlockedExchangeAdd(volatile LONG* ptr, LONG increment);
#pragma intrinsic(_InterlockedExchangeAdd)
inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
return _InterlockedExchangeAdd(ptr, increment);
}
#else
inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
LONG newval, LONG oldval) {
return ::InterlockedCompareExchange(ptr, newval, oldval);
}
inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
return ::InterlockedExchange(ptr, newval);
}
inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
return ::InterlockedExchangeAdd(ptr, increment);
}
#endif // ifdef __MINGW32__
} // extern "C"
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = FastInterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = FastInterlockedExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
} // namespace base::subtle
} // namespace base
// In msvc8/vs2005, winnt.h already contains a definition for
// MemoryBarrier in the global namespace. Add it there for earlier
// versions and forward to it from within the namespace.
#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
inline void MemoryBarrier() {
Atomic32 value = 0;
base::subtle::NoBarrier_AtomicExchange(&value, 0);
// actually acts as a barrier in thisd implementation
}
#endif
namespace base {
namespace subtle {
inline void MemoryBarrier() {
::MemoryBarrier();
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
Acquire_AtomicExchange(ptr, value);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit operations
#if defined(_WIN64) || defined(__MINGW64__)
// 64-bit low-level operations on 64-bit platform.
COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
// These are the intrinsics needed for 64-bit operations. Similar to the
// 32-bit case above.
extern "C" {
#if defined(__MINGW64__)
inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval) {
return ::InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr),
newval, oldval);
}
inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
return ::InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval);
}
inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
LONGLONG increment) {
return ::InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment);
}
#elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0
// Like above, we need to declare the intrinsics ourselves.
PVOID _InterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval);
#pragma intrinsic(_InterlockedCompareExchangePointer)
inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval) {
return _InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr),
newval, oldval);
}
PVOID _InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval);
#pragma intrinsic(_InterlockedExchangePointer)
inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
return _InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval);
}
LONGLONG _InterlockedExchangeAdd64(volatile LONGLONG* ptr, LONGLONG increment);
#pragma intrinsic(_InterlockedExchangeAdd64)
inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
LONGLONG increment) {
return _InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment);
}
#else
inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
PVOID newval, PVOID oldval) {
return ::InterlockedCompareExchangePointer(ptr, newval, oldval);
}
inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
return ::InterlockedExchangePointer(ptr, newval);
}
inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
LONGLONG increment) {
return ::InterlockedExchangeAdd64(ptr, increment);
}
#endif // ifdef __MINGW64__
} // extern "C"
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
PVOID result = FastInterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
PVOID result = FastInterlockedExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#else // defined(_WIN64) || defined(__MINGW64__)
// 64-bit low-level operations on 32-bit platform
// TODO(vchen): The GNU assembly below must be converted to MSVC inline
// assembly. Then the file should be renamed to ...-x86-msvc.h, probably.
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
function_name);
abort();
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
#if 0 // Not implemented
Atomic64 prev;
__asm__ __volatile__("movl (%3), %%ebx\n\t" // Move 64-bit new_value into
"movl 4(%3), %%ecx\n\t" // ecx:ebx
"lock; cmpxchg8b %1\n\t" // If edx:eax (old_value) same
: "=A" (prev) // as contents of ptr:
: "m" (*ptr), // ecx:ebx => ptr
"0" (old_value), // else:
"r" (&new_value) // old *ptr => edx:eax
: "memory", "%ebx", "%ecx");
return prev;
#else
NotImplementedFatalError("NoBarrier_CompareAndSwap");
return 0;
#endif
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
#if 0 // Not implemented
__asm__ __volatile__(
"movl (%2), %%ebx\n\t" // Move 64-bit new_value into
"movl 4(%2), %%ecx\n\t" // ecx:ebx
"0:\n\t"
"movl %1, %%eax\n\t" // Read contents of ptr into
"movl 4%1, %%edx\n\t" // edx:eax
"lock; cmpxchg8b %1\n\t" // Attempt cmpxchg; if *ptr
"jnz 0b\n\t" // is no longer edx:eax, loop
: "=A" (new_value)
: "m" (*ptr),
"r" (&new_value)
: "memory", "%ebx", "%ecx");
return new_value; // Now it's the previous value.
#else
NotImplementedFatalError("NoBarrier_AtomicExchange");
return 0;
#endif
}
inline void NoBarrier_Store(volatile Atomic64* ptrValue, Atomic64 value)
{
__asm {
movq mm0, value; // Use mmx reg for 64-bit atomic moves
mov eax, ptrValue;
movq [eax], mm0;
emms; // Empty mmx state to enable FP registers
}
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptrValue)
{
Atomic64 value;
__asm {
mov eax, ptrValue;
movq mm0, [eax]; // Use mmx reg for 64-bit atomic moves
movq value, mm0;
emms; // Empty mmx state to enable FP registers
}
return value;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = NoBarrier_Load(ptr);
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
#endif // defined(_WIN64) || defined(__MINGW64__)
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// FastInterlockedExchange has both acquire and release memory barriers.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
} // namespace base::subtle
} // namespace base
#endif // BASE_ATOMICOPS_INTERNALS_WINDOWS_H_

View File

@ -1,112 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This module gets enough CPU information to optimize the
* atomicops module on x86.
*/
#include "base/atomicops.h"
#include "base/basictypes.h"
#include "base/googleinit.h"
#include "base/logging.h"
#include <string.h>
// This file only makes sense with atomicops-internals-x86.h -- it
// depends on structs that are defined in that file. If atomicops.h
// doesn't sub-include that file, then we aren't needed, and shouldn't
// try to do anything.
#ifdef BASE_ATOMICOPS_INTERNALS_X86_H_
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
// of the global offset table. To avoid breaking such executables, this code
// must preserve that register's value across cpuid instructions.
#if defined(__i386__)
#define cpuid(a, b, c, d, inp) \
asm ("mov %%ebx, %%edi\n" \
"cpuid\n" \
"xchg %%edi, %%ebx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#elif defined (__x86_64__)
#define cpuid(a, b, c, d, inp) \
asm ("mov %%rbx, %%rdi\n" \
"cpuid\n" \
"xchg %%rdi, %%rbx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#endif
#if defined(cpuid) // initialize the struct only on x86
// Set the flags so that code will run correctly and conservatively
// until InitGoogle() is called.
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // no SSE2
false // no cmpxchg16b
};
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
static void AtomicOps_Internalx86CPUFeaturesInit() {
uint32 eax;
uint32 ebx;
uint32 ecx;
uint32 edx;
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
memcpy(vendor, &ebx, 4);
memcpy(vendor + 4, &edx, 4);
memcpy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax
cpuid(eax, ebx, ecx, edx, 1);
int family = (eax >> 8) & 0xf; // family and model fields
int model = (eax >> 4) & 0xf;
if (family == 0xf) { // use extended family and model fields
family += (eax >> 20) & 0xff;
model += ((eax >> 16) & 0xf) << 4;
}
// edx bit 26 is SSE2 which we use to tell use whether we can use mfence
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
// ecx bit 13 indicates whether the cmpxchg16b instruction is supported
AtomicOps_Internalx86CPUFeatures.has_cmpxchg16b = ((ecx >> 13) & 1);
}
REGISTER_MODULE_INITIALIZER(atomicops_x86, {
AtomicOps_Internalx86CPUFeaturesInit();
});
#endif
#endif /* ifdef BASE_ATOMICOPS_INTERNALS_X86_H_ */

View File

@ -1,391 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// Implementation of atomic operations for x86. This file should not
// be included directly. Clients should instead include
// "base/atomicops.h".
#ifndef BASE_ATOMICOPS_INTERNALS_X86_H_
#define BASE_ATOMICOPS_INTERNALS_X86_H_
#include "base/basictypes.h"
typedef int32_t Atomic32;
#define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
// NOTE(vchen): x86 does not need to define AtomicWordCastType, because it
// already matches Atomic32 or Atomic64, depending on the platform.
// This struct is not part of the public API of this module; clients may not
// use it.
// Features of this x86. Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
bool has_sse2; // Processor has SSE2.
bool has_cmpxchg16b; // Processor supports cmpxchg16b instruction.
};
ATTRIBUTE_VISIBILITY_HIDDEN
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace base {
namespace subtle {
typedef int64_t Atomic64;
// 32-bit low-level operations on any platform.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
__asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_val = NoBarrier_AtomicExchange(ptr, new_value);
return old_val;
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
// xchgl already has release memory barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
#if defined(__x86_64__)
// 64-bit implementations of memory barrier can be simpler, because it
// "mfence" is guaranteed to exist.
inline void MemoryBarrier() {
__asm__ __volatile__("mfence" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
#else
inline void MemoryBarrier() {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
__asm__ __volatile__("mfence" : : : "memory");
} else { // mfence is faster but not present on PIII
Atomic32 x = 0;
Acquire_AtomicExchange(&x, 0);
}
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
*ptr = value;
__asm__ __volatile__("mfence" : : : "memory");
} else {
Acquire_AtomicExchange(ptr, value);
}
}
#endif
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier.
// See comments in Atomic64 version of Release_Store(), below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
// See comments in Atomic64 version of Release_Store(), below.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(__x86_64__)
// 64-bit low-level operations on 64-bit platform.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
__asm__ __volatile__("lock; cmpxchgq %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
__asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_value);
return old_val;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
// xchgq already has release memory barrier semantics.
return NoBarrier_AtomicExchange(ptr, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier
// for current AMD/Intel chips as of Jan 2008.
// See also Acquire_Load(), below.
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
//
// x86 stores/loads fail to act as barriers for a few instructions (clflush
// maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
// not generated by the compiler, and are rare. Users of these instructions
// need to know about cache behaviour in any case since all of these involve
// either flushing cache lines or non-temporal cache hints.
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
// for current AMD/Intel chips as of Jan 2008.
// See also Release_Store(), above.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#else // defined(__x86_64__)
// 64-bit low-level operations on 32-bit platform.
#if !((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
// For compilers older than gcc 4.1, we use inline asm.
//
// Potential pitfalls:
//
// 1. %ebx points to Global offset table (GOT) with -fPIC.
// We need to preserve this register.
// 2. When explicit registers are used in inline asm, the
// compiler may not be aware of it and might try to reuse
// the same register for another argument which has constraints
// that allow it ("r" for example).
inline Atomic64 __sync_val_compare_and_swap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
__asm__ __volatile__("push %%ebx\n\t"
"movl (%3), %%ebx\n\t" // Move 64-bit new_value into
"movl 4(%3), %%ecx\n\t" // ecx:ebx
"lock; cmpxchg8b (%1)\n\t"// If edx:eax (old_value) same
"pop %%ebx\n\t"
: "=A" (prev) // as contents of ptr:
: "D" (ptr), // ecx:ebx => ptr
"0" (old_value), // else:
"S" (&new_value) // old *ptr => edx:eax
: "memory", "%ecx");
return prev;
}
#endif // Compiler < gcc-4.1
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_val,
Atomic64 new_val) {
return __sync_val_compare_and_swap(ptr, old_val, new_val);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_val) {
Atomic64 old_val;
do {
old_val = *ptr;
} while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val);
return old_val;
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_val) {
Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_val);
return old_val;
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_val) {
return NoBarrier_AtomicExchange(ptr, new_val);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Empty mmx state/Reset FP regs
: "=m" (*ptr)
: "m" (value)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_Store(ptr, value);
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
ATOMICOPS_COMPILER_BARRIER();
NoBarrier_Store(ptr, value);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
Atomic64 value;
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
"emms\n\t" // Empty mmx state/Reset FP regs
: "=m" (value)
: "m" (*ptr)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)",
"st(5)", "st(6)", "st(7)", "mm0", "mm1",
"mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
return value;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = NoBarrier_Load(ptr);
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return NoBarrier_Load(ptr);
}
#endif // defined(__x86_64__)
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return x;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
} // namespace base::subtle
} // namespace base
#undef ATOMICOPS_COMPILER_BARRIER
#endif // BASE_ATOMICOPS_INTERNALS_X86_H_

View File

@ -1,399 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// For atomic operations on statistics counters, see atomic_stats_counter.h.
// For atomic operations on sequence numbers, see atomic_sequence_num.h.
// For atomic operations on reference counts, see atomic_refcount.h.
// Some fast atomic operations -- typically with machine-dependent
// implementations. This file may need editing as Google code is
// ported to different architectures.
// The routines exported by this module are subtle. If you use them, even if
// you get the code right, it will depend on careful reasoning about atomicity
// and memory ordering; it will be less readable, and harder to maintain. If
// you plan to use these routines, you should have a good reason, such as solid
// evidence that performance would otherwise suffer, or there being no
// alternative. You should assume only properties explicitly guaranteed by the
// specifications in this file. You are almost certainly _not_ writing code
// just for the x86; if you assume x86 semantics, x86 hardware bugs and
// implementations on other archtectures will cause your code to break. If you
// do not know what you are doing, avoid these routines, and use a Mutex.
//
// These following lower-level operations are typically useful only to people
// implementing higher-level synchronization operations like spinlocks,
// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
// a store with appropriate memory-ordering instructions. "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
//
// It is incorrect to make direct assignments to/from an atomic variable.
// You should use one of the Load or Store routines. The NoBarrier
// versions are provided when no barriers are needed:
// NoBarrier_Store()
// NoBarrier_Load()
// Although there are currently no compiler enforcement, you are encouraged
// to use these. Moreover, if you choose to use base::subtle::Atomic64 type,
// you MUST use one of the Load or Store routines to get correct behavior
// on 32-bit platforms.
//
// The intent is eventually to put all of these routines in namespace
// base::subtle
#ifndef THREAD_ATOMICOPS_H_
#define THREAD_ATOMICOPS_H_
#include "../config.h"
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
// ------------------------------------------------------------------------
// Include the platform specific implementations of the types
// and operations listed below. Implementations are to provide Atomic32
// and Atomic64 operations. If there is a mismatch between intptr_t and
// the Atomic32 or Atomic64 types for a platform, the platform-specific header
// should define the macro, AtomicWordCastType in a clause similar to the
// following:
// #if ...pointers are 64 bits...
// # define AtomicWordCastType base::subtle::Atomic64
// #else
// # define AtomicWordCastType Atomic32
// #endif
// TODO(csilvers): figure out ARCH_PIII/ARCH_K8 (perhaps via ./configure?)
// ------------------------------------------------------------------------
#include "base/arm_instruction_set_select.h"
#define GCC_VERSION (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
#define CLANG_VERSION (__clang_major__ * 10000 \
+ __clang_minor__ * 100 \
+ __clang_patchlevel__)
#if defined(TCMALLOC_PREFER_GCC_ATOMICS) && defined(__GNUC__) && GCC_VERSION >= 40700
#include "base/atomicops-internals-gcc.h"
#elif defined(TCMALLOC_PREFER_GCC_ATOMICS) && defined(__clang__) && CLANG_VERSION >= 30400
#include "base/atomicops-internals-gcc.h"
#elif defined(__MACH__) && defined(__APPLE__)
#include "base/atomicops-internals-macosx.h"
#elif defined(__GNUC__) && defined(ARMV6)
#include "base/atomicops-internals-arm-v6plus.h"
#elif defined(ARMV3)
#include "base/atomicops-internals-arm-generic.h"
#elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__))
#include "base/atomicops-internals-x86.h"
#elif defined(_WIN32)
#include "base/atomicops-internals-windows.h"
#elif defined(__linux__) && defined(__PPC__)
#include "base/atomicops-internals-linuxppc.h"
#elif defined(__GNUC__) && defined(__mips__)
#include "base/atomicops-internals-mips.h"
#elif defined(__GNUC__) && GCC_VERSION >= 40700
#include "base/atomicops-internals-gcc.h"
#elif defined(__clang__) && CLANG_VERSION >= 30400
#include "base/atomicops-internals-gcc.h"
#else
#error You need to implement atomic operations for this architecture
#endif
// Signed type that can hold a pointer and supports the atomic ops below, as
// well as atomic loads and stores. Instances must be naturally-aligned.
typedef intptr_t AtomicWord;
#ifdef AtomicWordCastType
// ------------------------------------------------------------------------
// This section is needed only when explicit type casting is required to
// cast AtomicWord to one of the basic atomic types (Atomic64 or Atomic32).
// It also serves to document the AtomicWord interface.
// ------------------------------------------------------------------------
namespace base {
namespace subtle {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return NoBarrier_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return NoBarrier_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord Acquire_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return Acquire_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord Release_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return Release_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Acquire_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Release_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
NoBarrier_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Acquire_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Release_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
return NoBarrier_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return base::subtle::Acquire_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return base::subtle::Release_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
} // namespace base::subtle
} // namespace base
#endif // AtomicWordCastType
// ------------------------------------------------------------------------
// Commented out type definitions and method declarations for documentation
// of the interface provided by this module.
// ------------------------------------------------------------------------
#if 0
// Signed 32-bit type that supports the atomic ops below, as well as atomic
// loads and stores. Instances must be naturally aligned. This type differs
// from AtomicWord in 64-bit binaries where AtomicWord is 64-bits.
typedef int32_t Atomic32;
// Corresponding operations on Atomic32
namespace base {
namespace subtle {
// Signed 64-bit type that supports the atomic ops below, as well as atomic
// loads and stores. Instances must be naturally aligned. This type differs
// from AtomicWord in 32-bit binaries where AtomicWord is 32-bits.
typedef int64_t Atomic64;
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
Atomic32 Release_Load(volatile const Atomic32* ptr);
// Corresponding operations on Atomic64
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
Atomic64 Release_Load(volatile const Atomic64* ptr);
} // namespace base::subtle
} // namespace base
void MemoryBarrier();
#endif // 0
// ------------------------------------------------------------------------
// The following are to be deprecated when all uses have been changed to
// use the base::subtle namespace.
// ------------------------------------------------------------------------
#ifdef AtomicWordCastType
// AtomicWord versions to be deprecated
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Acquire_Store(ptr, value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Release_Store(ptr, value);
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return base::subtle::Acquire_Load(ptr);
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return base::subtle::Release_Load(ptr);
}
#endif // AtomicWordCastType
// 32-bit Acquire/Release operations to be deprecated.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
base::subtle::Acquire_Store(ptr, value);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
return base::subtle::Release_Store(ptr, value);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return base::subtle::Acquire_Load(ptr);
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return base::subtle::Release_Load(ptr);
}
#ifdef BASE_HAS_ATOMIC64
// 64-bit Acquire/Release operations to be deprecated.
inline base::subtle::Atomic64 Acquire_CompareAndSwap(
volatile base::subtle::Atomic64* ptr,
base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline base::subtle::Atomic64 Release_CompareAndSwap(
volatile base::subtle::Atomic64* ptr,
base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
}
inline void Acquire_Store(
volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
base::subtle::Acquire_Store(ptr, value);
}
inline void Release_Store(
volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
return base::subtle::Release_Store(ptr, value);
}
inline base::subtle::Atomic64 Acquire_Load(
volatile const base::subtle::Atomic64* ptr) {
return base::subtle::Acquire_Load(ptr);
}
inline base::subtle::Atomic64 Release_Load(
volatile const base::subtle::Atomic64* ptr) {
return base::subtle::Release_Load(ptr);
}
#endif // BASE_HAS_ATOMIC64
#endif // THREAD_ATOMICOPS_H_

View File

@ -1,408 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef _BASICTYPES_H_
#define _BASICTYPES_H_
#include "../config.h"
#include <string.h> // for memcpy()
#ifdef HAVE_INTTYPES_H
#include <inttypes.h> // gets us PRId64, etc
#endif
// To use this in an autoconf setting, make sure you run the following
// autoconf macros:
// AC_HEADER_STDC /* for stdint_h and inttypes_h */
// AC_CHECK_TYPES([__int64]) /* defined in some windows platforms */
#ifdef HAVE_INTTYPES_H
#include <inttypes.h> // uint16_t might be here; PRId64 too.
#endif
#ifdef HAVE_STDINT_H
#include <stdint.h> // to get uint16_t (ISO naming madness)
#endif
#include <sys/types.h> // our last best hope for uint16_t
// Standard typedefs
// All Google code is compiled with -funsigned-char to make "char"
// unsigned. Google code therefore doesn't need a "uchar" type.
// TODO(csilvers): how do we make sure unsigned-char works on non-gcc systems?
typedef signed char schar;
typedef int8_t int8;
typedef int16_t int16;
typedef int32_t int32;
typedef int64_t int64;
// NOTE: unsigned types are DANGEROUS in loops and other arithmetical
// places. Use the signed types unless your variable represents a bit
// pattern (eg a hash value) or you really need the extra bit. Do NOT
// use 'unsigned' to express "this value should always be positive";
// use assertions for this.
typedef uint8_t uint8;
typedef uint16_t uint16;
typedef uint32_t uint32;
typedef uint64_t uint64;
const uint16 kuint16max = ( (uint16) 0xFFFF);
const uint32 kuint32max = ( (uint32) 0xFFFFFFFF);
const uint64 kuint64max = ( (((uint64) kuint32max) << 32) | kuint32max );
const int8 kint8max = ( ( int8) 0x7F);
const int16 kint16max = ( ( int16) 0x7FFF);
const int32 kint32max = ( ( int32) 0x7FFFFFFF);
const int64 kint64max = ( ((( int64) kint32max) << 32) | kuint32max );
const int8 kint8min = ( ( int8) 0x80);
const int16 kint16min = ( ( int16) 0x8000);
const int32 kint32min = ( ( int32) 0x80000000);
const int64 kint64min = ( (((uint64) kint32min) << 32) | 0 );
// Define the "portable" printf and scanf macros, if they're not
// already there (via the inttypes.h we #included above, hopefully).
// Mostly it's old systems that don't support inttypes.h, so we assume
// they're 32 bit.
#ifndef PRIx64
#define PRIx64 "llx"
#endif
#ifndef SCNx64
#define SCNx64 "llx"
#endif
#ifndef PRId64
#define PRId64 "lld"
#endif
#ifndef SCNd64
#define SCNd64 "lld"
#endif
#ifndef PRIu64
#define PRIu64 "llu"
#endif
#ifndef PRIxPTR
#define PRIxPTR "lx"
#endif
// Also allow for printing of a pthread_t.
#define GPRIuPTHREAD "lu"
#define GPRIxPTHREAD "lx"
#if defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__APPLE__) || defined(__FreeBSD__)
#define PRINTABLE_PTHREAD(pthreadt) reinterpret_cast<uintptr_t>(pthreadt)
#else
#define PRINTABLE_PTHREAD(pthreadt) pthreadt
#endif
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
// An alternate name that leaves out the moral judgment... :-)
#define DISALLOW_COPY_AND_ASSIGN(TypeName) DISALLOW_EVIL_CONSTRUCTORS(TypeName)
// The COMPILE_ASSERT macro can be used to verify that a compile time
// expression is true. For example, you could use it to verify the
// size of a static array:
//
// COMPILE_ASSERT(sizeof(num_content_type_names) == sizeof(int),
// content_type_names_incorrect_size);
//
// or to make sure a struct is smaller than a certain size:
//
// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
//
// The second argument to the macro is the name of the variable. If
// the expression is false, most compilers will issue a warning/error
// containing the name of the variable.
//
// Implementation details of COMPILE_ASSERT:
//
// - COMPILE_ASSERT works by defining an array type that has -1
// elements (and thus is invalid) when the expression is false.
//
// - The simpler definition
//
// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
//
// does not work, as gcc supports variable-length arrays whose sizes
// are determined at run-time (this is gcc's extension and not part
// of the C++ standard). As a result, gcc fails to reject the
// following code with the simple definition:
//
// int foo;
// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
// // not a compile-time constant.
//
// - By using the type CompileAssert<(bool(expr))>, we ensures that
// expr is a compile-time constant. (Template arguments must be
// determined at compile-time.)
//
// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
//
// CompileAssert<bool(expr)>
//
// instead, these compilers will refuse to compile
//
// COMPILE_ASSERT(5 > 0, some_message);
//
// (They seem to think the ">" in "5 > 0" marks the end of the
// template argument list.)
//
// - The array size is (bool(expr) ? 1 : -1), instead of simply
//
// ((expr) ? 1 : -1).
//
// This is to avoid running into a bug in MS VC 7.1, which
// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
template <bool>
struct CompileAssert {
};
#ifdef HAVE___ATTRIBUTE__
# define ATTRIBUTE_UNUSED __attribute__((unused))
#else
# define ATTRIBUTE_UNUSED
#endif
#if defined(HAVE___ATTRIBUTE__) && defined(HAVE_TLS)
#define ATTR_INITIAL_EXEC __attribute__ ((tls_model ("initial-exec")))
#else
#define ATTR_INITIAL_EXEC
#endif
#define COMPILE_ASSERT(expr, msg) \
typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] ATTRIBUTE_UNUSED
#define arraysize(a) (sizeof(a) / sizeof(*(a)))
#define OFFSETOF_MEMBER(strct, field) \
(reinterpret_cast<char*>(&reinterpret_cast<strct*>(16)->field) - \
reinterpret_cast<char*>(16))
// bit_cast<Dest,Source> implements the equivalent of
// "*reinterpret_cast<Dest*>(&source)".
//
// The reinterpret_cast method would produce undefined behavior
// according to ISO C++ specification section 3.10 -15 -.
// bit_cast<> calls memcpy() which is blessed by the standard,
// especially by the example in section 3.9.
//
// Fortunately memcpy() is very fast. In optimized mode, with a
// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
// code with the minimal amount of data movement. On a 32-bit system,
// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
// compiles to two loads and two stores.
template <class Dest, class Source>
inline Dest bit_cast(const Source& source) {
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes);
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
}
// bit_store<Dest,Source> implements the equivalent of
// "dest = *reinterpret_cast<Dest*>(&source)".
//
// This prevents undefined behavior when the dest pointer is unaligned.
template <class Dest, class Source>
inline void bit_store(Dest *dest, const Source *source) {
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes);
memcpy(dest, source, sizeof(Dest));
}
#ifdef HAVE___ATTRIBUTE__
# define ATTRIBUTE_WEAK __attribute__((weak))
# define ATTRIBUTE_NOINLINE __attribute__((noinline))
#else
# define ATTRIBUTE_WEAK
# define ATTRIBUTE_NOINLINE
#endif
#if defined(HAVE___ATTRIBUTE__) && defined(__ELF__)
# define ATTRIBUTE_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
#else
# define ATTRIBUTE_VISIBILITY_HIDDEN
#endif
// Section attributes are supported for both ELF and Mach-O, but in
// very different ways. Here's the API we provide:
// 1) ATTRIBUTE_SECTION: put this with the declaration of all functions
// you want to be in the same linker section
// 2) DEFINE_ATTRIBUTE_SECTION_VARS: must be called once per unique
// name. You want to make sure this is executed before any
// DECLARE_ATTRIBUTE_SECTION_VARS; the easiest way is to put them
// in the same .cc file. Put this call at the global level.
// 3) INIT_ATTRIBUTE_SECTION_VARS: you can scatter calls to this in
// multiple places to help ensure execution before any
// DECLARE_ATTRIBUTE_SECTION_VARS. You must have at least one
// DEFINE, but you can have many INITs. Put each in its own scope.
// 4) DECLARE_ATTRIBUTE_SECTION_VARS: must be called before using
// ATTRIBUTE_SECTION_START or ATTRIBUTE_SECTION_STOP on a name.
// Put this call at the global level.
// 5) ATTRIBUTE_SECTION_START/ATTRIBUTE_SECTION_STOP: call this to say
// where in memory a given section is. All functions declared with
// ATTRIBUTE_SECTION are guaranteed to be between START and STOP.
#if defined(HAVE___ATTRIBUTE__) && defined(__ELF__)
# define ATTRIBUTE_SECTION(name) __attribute__ ((section (#name)))
// Weak section declaration to be used as a global declaration
// for ATTRIBUTE_SECTION_START|STOP(name) to compile and link
// even without functions with ATTRIBUTE_SECTION(name).
# define DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char __start_##name[] ATTRIBUTE_WEAK; \
extern char __stop_##name[] ATTRIBUTE_WEAK
# define INIT_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF
# define DEFINE_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF
// Return void* pointers to start/end of a section of code with functions
// having ATTRIBUTE_SECTION(name), or 0 if no such function exists.
// One must DECLARE_ATTRIBUTE_SECTION(name) for this to compile and link.
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name))
# define HAVE_ATTRIBUTE_SECTION_START 1
#elif defined(HAVE___ATTRIBUTE__) && defined(__MACH__)
# define ATTRIBUTE_SECTION(name) __attribute__ ((section ("__TEXT, " #name)))
#include <mach-o/getsect.h>
#include <mach-o/dyld.h>
class AssignAttributeStartEnd {
public:
AssignAttributeStartEnd(const char* name, char** pstart, char** pend) {
// Find out what dynamic library name is defined in
if (_dyld_present()) {
for (int i = _dyld_image_count() - 1; i >= 0; --i) {
const mach_header* hdr = _dyld_get_image_header(i);
#ifdef MH_MAGIC_64
if (hdr->magic == MH_MAGIC_64) {
uint64_t len;
*pstart = getsectdatafromheader_64((mach_header_64*)hdr,
"__TEXT", name, &len);
if (*pstart) { // NULL if not defined in this dynamic library
*pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc
*pend = *pstart + len;
return;
}
}
#endif
if (hdr->magic == MH_MAGIC) {
uint32_t len;
*pstart = getsectdatafromheader(hdr, "__TEXT", name, &len);
if (*pstart) { // NULL if not defined in this dynamic library
*pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc
*pend = *pstart + len;
return;
}
}
}
}
// If we get here, not defined in a dll at all. See if defined statically.
unsigned long len; // don't ask me why this type isn't uint32_t too...
*pstart = getsectdata("__TEXT", name, &len);
*pend = *pstart + len;
}
};
#define DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char* __start_##name; \
extern char* __stop_##name
#define INIT_ATTRIBUTE_SECTION_VARS(name) \
DECLARE_ATTRIBUTE_SECTION_VARS(name); \
static const AssignAttributeStartEnd __assign_##name( \
#name, &__start_##name, &__stop_##name)
#define DEFINE_ATTRIBUTE_SECTION_VARS(name) \
char* __start_##name, *__stop_##name; \
INIT_ATTRIBUTE_SECTION_VARS(name)
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name))
# define HAVE_ATTRIBUTE_SECTION_START 1
#else // not HAVE___ATTRIBUTE__ && __ELF__, nor HAVE___ATTRIBUTE__ && __MACH__
# define ATTRIBUTE_SECTION(name)
# define DECLARE_ATTRIBUTE_SECTION_VARS(name)
# define INIT_ATTRIBUTE_SECTION_VARS(name)
# define DEFINE_ATTRIBUTE_SECTION_VARS(name)
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(0))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(0))
#endif // HAVE___ATTRIBUTE__ and __ELF__ or __MACH__
#if defined(HAVE___ATTRIBUTE__)
# if (defined(__i386__) || defined(__x86_64__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
# elif (defined(__PPC__) || defined(__PPC64__))
# define CACHELINE_ALIGNED __attribute__((aligned(16)))
# elif (defined(__arm__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
// some ARMs have shorter cache lines (ARM1176JZF-S is 32 bytes for example) but obviously 64-byte aligned implies 32-byte aligned
# elif (defined(__mips__))
# define CACHELINE_ALIGNED __attribute__((aligned(128)))
# elif (defined(__aarch64__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
// implementation specific, Cortex-A53 and 57 should have 64 bytes
# elif (defined(__s390__))
# define CACHELINE_ALIGNED __attribute__((aligned(256)))
# else
# error Could not determine cache line length - unknown architecture
# endif
#else
# define CACHELINE_ALIGNED
#endif // defined(HAVE___ATTRIBUTE__) && (__i386__ || __x86_64__)
// Structure for discovering alignment
union MemoryAligner {
void* p;
double d;
size_t s;
} CACHELINE_ALIGNED;
// The following enum should be used only as a constructor argument to indicate
// that the variable has static storage class, and that the constructor should
// do nothing to its state. It indicates to the reader that it is legal to
// declare a static nistance of the class, provided the constructor is given
// the base::LINKER_INITIALIZED argument. Normally, it is unsafe to declare a
// static variable that has a constructor or a destructor because invocation
// order is undefined. However, IF the type can be initialized by filling with
// zeroes (which the loader does for static variables), AND the destructor also
// does nothing to the storage, then a constructor declared as
// explicit MyClass(base::LinkerInitialized x) {}
// and invoked as
// static MyClass my_variable_name(base::LINKER_INITIALIZED);
namespace base {
enum LinkerInitialized { LINKER_INITIALIZED };
}
#endif // _BASICTYPES_H_

View File

@ -1,166 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file is a compatibility layer that defines Google's version of
// command line flags that are used for configuration.
//
// We put flags into their own namespace. It is purposefully
// named in an opaque way that people should have trouble typing
// directly. The idea is that DEFINE puts the flag in the weird
// namespace, and DECLARE imports the flag from there into the
// current namespace. The net result is to force people to use
// DECLARE to get access to a flag, rather than saying
// extern bool FLAGS_logtostderr;
// or some such instead. We want this so we can put extra
// functionality (like sanity-checking) in DECLARE if we want,
// and make sure it is picked up everywhere.
//
// We also put the type of the variable in the namespace, so that
// people can't DECLARE_int32 something that they DEFINE_bool'd
// elsewhere.
#ifndef BASE_COMMANDLINEFLAGS_H_
#define BASE_COMMANDLINEFLAGS_H_
#include "../config.h"
#include <string>
#include <string.h> // for memchr
#include <stdlib.h> // for getenv
#include "base/basictypes.h"
#define DECLARE_VARIABLE(type, name) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
extern PERFTOOLS_DLL_DECL type FLAGS_##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
#define DEFINE_VARIABLE(type, name, value, meaning) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
PERFTOOLS_DLL_DECL type FLAGS_##name(value); \
char FLAGS_no##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
// bool specialization
#define DECLARE_bool(name) \
DECLARE_VARIABLE(bool, name)
#define DEFINE_bool(name, value, meaning) \
DEFINE_VARIABLE(bool, name, value, meaning)
// int32 specialization
#define DECLARE_int32(name) \
DECLARE_VARIABLE(int32, name)
#define DEFINE_int32(name, value, meaning) \
DEFINE_VARIABLE(int32, name, value, meaning)
// int64 specialization
#define DECLARE_int64(name) \
DECLARE_VARIABLE(int64, name)
#define DEFINE_int64(name, value, meaning) \
DEFINE_VARIABLE(int64, name, value, meaning)
#define DECLARE_uint64(name) \
DECLARE_VARIABLE(uint64, name)
#define DEFINE_uint64(name, value, meaning) \
DEFINE_VARIABLE(uint64, name, value, meaning)
// double specialization
#define DECLARE_double(name) \
DECLARE_VARIABLE(double, name)
#define DEFINE_double(name, value, meaning) \
DEFINE_VARIABLE(double, name, value, meaning)
// Special case for string, because we have to specify the namespace
// std::string, which doesn't play nicely with our FLAG__namespace hackery.
#define DECLARE_string(name) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
extern std::string FLAGS_##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
#define DEFINE_string(name, value, meaning) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
std::string FLAGS_##name(value); \
char FLAGS_no##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
// implemented in sysinfo.cc
namespace tcmalloc {
namespace commandlineflags {
inline bool StringToBool(const char *value, bool def) {
if (!value) {
return def;
}
return memchr("tTyY1\0", value[0], 6) != NULL;
}
inline int StringToInt(const char *value, int def) {
if (!value) {
return def;
}
return strtol(value, NULL, 10);
}
inline long long StringToLongLong(const char *value, long long def) {
if (!value) {
return def;
}
return strtoll(value, NULL, 10);
}
inline double StringToDouble(const char *value, double def) {
if (!value) {
return def;
}
return strtod(value, NULL);
}
}
}
// These macros (could be functions, but I don't want to bother with a .cc
// file), make it easier to initialize flags from the environment.
#define EnvToString(envname, dflt) \
(!getenv(envname) ? (dflt) : getenv(envname))
#define EnvToBool(envname, dflt) \
tcmalloc::commandlineflags::StringToBool(getenv(envname), dflt)
#define EnvToInt(envname, dflt) \
tcmalloc::commandlineflags::StringToInt(getenv(envname), dflt)
#define EnvToInt64(envname, dflt) \
tcmalloc::commandlineflags::StringToLongLong(getenv(envname), dflt)
#define EnvToDouble(envname, dflt) \
tcmalloc::commandlineflags::StringToDouble(getenv(envname), dflt)
#endif // BASE_COMMANDLINEFLAGS_H_

View File

@ -1,179 +0,0 @@
/* Copyright (c) 2008-2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Kostya Serebryany
*/
#ifdef __cplusplus
# error "This file should be built as pure C to avoid name mangling"
#endif
#include "config.h"
#include <stdlib.h>
#include <string.h>
#include "base/dynamic_annotations.h"
#include "getenv_safe.h" // for TCMallocGetenvSafe
#ifdef __GNUC__
/* valgrind.h uses gcc extensions so it won't build with other compilers */
# ifdef HAVE_VALGRIND_H /* prefer the user's copy if they have it */
# include <valgrind.h>
# else /* otherwise just use the copy that we have */
# include "third_party/valgrind.h"
# endif
#endif
/* Compiler-based ThreadSanitizer defines
DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1
and provides its own definitions of the functions. */
#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL
# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0
#endif
/* Each function is empty and called (via a macro) only in debug mode.
The arguments are captured by dynamic tools at runtime. */
#if DYNAMIC_ANNOTATIONS_ENABLED == 1 \
&& DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
void AnnotateRWLockCreate(const char *file, int line,
const volatile void *lock){}
void AnnotateRWLockDestroy(const char *file, int line,
const volatile void *lock){}
void AnnotateRWLockAcquired(const char *file, int line,
const volatile void *lock, long is_w){}
void AnnotateRWLockReleased(const char *file, int line,
const volatile void *lock, long is_w){}
void AnnotateBarrierInit(const char *file, int line,
const volatile void *barrier, long count,
long reinitialization_allowed) {}
void AnnotateBarrierWaitBefore(const char *file, int line,
const volatile void *barrier) {}
void AnnotateBarrierWaitAfter(const char *file, int line,
const volatile void *barrier) {}
void AnnotateBarrierDestroy(const char *file, int line,
const volatile void *barrier) {}
void AnnotateCondVarWait(const char *file, int line,
const volatile void *cv,
const volatile void *lock){}
void AnnotateCondVarSignal(const char *file, int line,
const volatile void *cv){}
void AnnotateCondVarSignalAll(const char *file, int line,
const volatile void *cv){}
void AnnotatePublishMemoryRange(const char *file, int line,
const volatile void *address,
long size){}
void AnnotateUnpublishMemoryRange(const char *file, int line,
const volatile void *address,
long size){}
void AnnotatePCQCreate(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQDestroy(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQPut(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQGet(const char *file, int line,
const volatile void *pcq){}
void AnnotateNewMemory(const char *file, int line,
const volatile void *mem,
long size){}
void AnnotateExpectRace(const char *file, int line,
const volatile void *mem,
const char *description){}
void AnnotateBenignRace(const char *file, int line,
const volatile void *mem,
const char *description){}
void AnnotateBenignRaceSized(const char *file, int line,
const volatile void *mem,
long size,
const char *description) {}
void AnnotateMutexIsUsedAsCondVar(const char *file, int line,
const volatile void *mu){}
void AnnotateTraceMemory(const char *file, int line,
const volatile void *arg){}
void AnnotateThreadName(const char *file, int line,
const char *name){}
void AnnotateIgnoreReadsBegin(const char *file, int line){}
void AnnotateIgnoreReadsEnd(const char *file, int line){}
void AnnotateIgnoreWritesBegin(const char *file, int line){}
void AnnotateIgnoreWritesEnd(const char *file, int line){}
void AnnotateEnableRaceDetection(const char *file, int line, int enable){}
void AnnotateNoOp(const char *file, int line,
const volatile void *arg){}
void AnnotateFlushState(const char *file, int line){}
#endif /* DYNAMIC_ANNOTATIONS_ENABLED == 1
&& DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
static int GetRunningOnValgrind(void) {
#ifdef RUNNING_ON_VALGRIND
if (RUNNING_ON_VALGRIND) return 1;
#endif
const char *running_on_valgrind_str = TCMallocGetenvSafe("RUNNING_ON_VALGRIND");
if (running_on_valgrind_str) {
return strcmp(running_on_valgrind_str, "0") != 0;
}
return 0;
}
/* See the comments in dynamic_annotations.h */
int RunningOnValgrind(void) {
static volatile int running_on_valgrind = -1;
int local_running_on_valgrind = running_on_valgrind;
/* C doesn't have thread-safe initialization of statics, and we
don't want to depend on pthread_once here, so hack it. */
ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack");
if (local_running_on_valgrind == -1)
running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
return local_running_on_valgrind;
}
#endif /* DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
/* See the comments in dynamic_annotations.h */
double ValgrindSlowdown(void) {
/* Same initialization hack as in RunningOnValgrind(). */
static volatile double slowdown = 0.0;
double local_slowdown = slowdown;
ANNOTATE_BENIGN_RACE(&slowdown, "safe hack");
if (RunningOnValgrind() == 0) {
return 1.0;
}
if (local_slowdown == 0.0) {
char *env = getenv("VALGRIND_SLOWDOWN");
slowdown = local_slowdown = env ? atof(env) : 50.0;
}
return local_slowdown;
}

View File

@ -1,627 +0,0 @@
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Kostya Serebryany
*/
/* This file defines dynamic annotations for use with dynamic analysis
tool such as valgrind, PIN, etc.
Dynamic annotation is a source code annotation that affects
the generated code (that is, the annotation is not a comment).
Each such annotation is attached to a particular
instruction and/or to a particular object (address) in the program.
The annotations that should be used by users are macros in all upper-case
(e.g., ANNOTATE_NEW_MEMORY).
Actual implementation of these macros may differ depending on the
dynamic analysis tool being used.
See http://code.google.com/p/data-race-test/ for more information.
This file supports the following dynamic analysis tools:
- None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero).
Macros are defined empty.
- ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1).
Macros are defined as calls to non-inlinable empty functions
that are intercepted by Valgrind. */
#ifndef BASE_DYNAMIC_ANNOTATIONS_H_
#define BASE_DYNAMIC_ANNOTATIONS_H_
#ifndef DYNAMIC_ANNOTATIONS_ENABLED
# define DYNAMIC_ANNOTATIONS_ENABLED 0
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0
/* -------------------------------------------------------------
Annotations useful when implementing condition variables such as CondVar,
using conditional critical sections (Await/LockWhen) and when constructing
user-defined synchronization mechanisms.
The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can
be used to define happens-before arcs in user-defined synchronization
mechanisms: the race detector will infer an arc from the former to the
latter when they share the same argument pointer.
Example 1 (reference counting):
void Unref() {
ANNOTATE_HAPPENS_BEFORE(&refcount_);
if (AtomicDecrementByOne(&refcount_) == 0) {
ANNOTATE_HAPPENS_AFTER(&refcount_);
delete this;
}
}
Example 2 (message queue):
void MyQueue::Put(Type *e) {
MutexLock lock(&mu_);
ANNOTATE_HAPPENS_BEFORE(e);
PutElementIntoMyQueue(e);
}
Type *MyQueue::Get() {
MutexLock lock(&mu_);
Type *e = GetElementFromMyQueue();
ANNOTATE_HAPPENS_AFTER(e);
return e;
}
Note: when possible, please use the existing reference counting and message
queue implementations instead of inventing new ones. */
/* Report that wait on the condition variable at address "cv" has succeeded
and the lock at address "lock" is held. */
#define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
AnnotateCondVarWait(__FILE__, __LINE__, cv, lock)
/* Report that wait on the condition variable at "cv" has succeeded. Variant
w/o lock. */
#define ANNOTATE_CONDVAR_WAIT(cv) \
AnnotateCondVarWait(__FILE__, __LINE__, cv, NULL)
/* Report that we are about to signal on the condition variable at address
"cv". */
#define ANNOTATE_CONDVAR_SIGNAL(cv) \
AnnotateCondVarSignal(__FILE__, __LINE__, cv)
/* Report that we are about to signal_all on the condition variable at "cv". */
#define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
AnnotateCondVarSignalAll(__FILE__, __LINE__, cv)
/* Annotations for user-defined synchronization mechanisms. */
#define ANNOTATE_HAPPENS_BEFORE(obj) ANNOTATE_CONDVAR_SIGNAL(obj)
#define ANNOTATE_HAPPENS_AFTER(obj) ANNOTATE_CONDVAR_WAIT(obj)
/* Report that the bytes in the range [pointer, pointer+size) are about
to be published safely. The race checker will create a happens-before
arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
subsequent accesses to this memory.
Note: this annotation may not work properly if the race detector uses
sampling, i.e. does not observe all memory accesses.
*/
#define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
AnnotatePublishMemoryRange(__FILE__, __LINE__, pointer, size)
/* DEPRECATED. Don't use it. */
#define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \
AnnotateUnpublishMemoryRange(__FILE__, __LINE__, pointer, size)
/* DEPRECATED. Don't use it. */
#define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) \
do { \
ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \
ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size); \
} while (0)
/* Instruct the tool to create a happens-before arc between mu->Unlock() and
mu->Lock(). This annotation may slow down the race detector and hide real
races. Normally it is used only when it would be difficult to annotate each
of the mutex's critical sections individually using the annotations above.
This annotation makes sense only for hybrid race detectors. For pure
happens-before detectors this is a no-op. For more details see
http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */
#define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu)
/* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
#define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \
AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu)
/* -------------------------------------------------------------
Annotations useful when defining memory allocators, or when memory that
was protected in one way starts to be protected in another. */
/* Report that a new memory at "address" of size "size" has been allocated.
This might be used when the memory has been retrieved from a free list and
is about to be reused, or when a the locking discipline for a variable
changes. */
#define ANNOTATE_NEW_MEMORY(address, size) \
AnnotateNewMemory(__FILE__, __LINE__, address, size)
/* -------------------------------------------------------------
Annotations useful when defining FIFO queues that transfer data between
threads. */
/* Report that the producer-consumer queue (such as ProducerConsumerQueue) at
address "pcq" has been created. The ANNOTATE_PCQ_* annotations
should be used only for FIFO queues. For non-FIFO queues use
ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
#define ANNOTATE_PCQ_CREATE(pcq) \
AnnotatePCQCreate(__FILE__, __LINE__, pcq)
/* Report that the queue at address "pcq" is about to be destroyed. */
#define ANNOTATE_PCQ_DESTROY(pcq) \
AnnotatePCQDestroy(__FILE__, __LINE__, pcq)
/* Report that we are about to put an element into a FIFO queue at address
"pcq". */
#define ANNOTATE_PCQ_PUT(pcq) \
AnnotatePCQPut(__FILE__, __LINE__, pcq)
/* Report that we've just got an element from a FIFO queue at address "pcq". */
#define ANNOTATE_PCQ_GET(pcq) \
AnnotatePCQGet(__FILE__, __LINE__, pcq)
/* -------------------------------------------------------------
Annotations that suppress errors. It is usually better to express the
program's synchronization using the other annotations, but these can
be used when all else fails. */
/* Report that we may have a benign race at "pointer", with size
"sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the
point where "pointer" has been allocated, preferably close to the point
where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */
#define ANNOTATE_BENIGN_RACE(pointer, description) \
AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \
sizeof(*(pointer)), description)
/* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
the memory range [address, address+size). */
#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description)
/* Request the analysis tool to ignore all reads in the current thread
until ANNOTATE_IGNORE_READS_END is called.
Useful to ignore intentional racey reads, while still checking
other reads and all writes.
See also ANNOTATE_UNPROTECTED_READ. */
#define ANNOTATE_IGNORE_READS_BEGIN() \
AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
/* Stop ignoring reads. */
#define ANNOTATE_IGNORE_READS_END() \
AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
/* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
/* Stop ignoring writes. */
#define ANNOTATE_IGNORE_WRITES_END() \
AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
/* Start ignoring all memory accesses (reads and writes). */
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do {\
ANNOTATE_IGNORE_READS_BEGIN();\
ANNOTATE_IGNORE_WRITES_BEGIN();\
}while(0)\
/* Stop ignoring all memory accesses. */
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do {\
ANNOTATE_IGNORE_WRITES_END();\
ANNOTATE_IGNORE_READS_END();\
}while(0)\
/* Enable (enable!=0) or disable (enable==0) race detection for all threads.
This annotation could be useful if you want to skip expensive race analysis
during some period of program execution, e.g. during initialization. */
#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
AnnotateEnableRaceDetection(__FILE__, __LINE__, enable)
/* -------------------------------------------------------------
Annotations useful for debugging. */
/* Request to trace every access to "address". */
#define ANNOTATE_TRACE_MEMORY(address) \
AnnotateTraceMemory(__FILE__, __LINE__, address)
/* Report the current thread name to a race detector. */
#define ANNOTATE_THREAD_NAME(name) \
AnnotateThreadName(__FILE__, __LINE__, name)
/* -------------------------------------------------------------
Annotations useful when implementing locks. They are not
normally needed by modules that merely use locks.
The "lock" argument is a pointer to the lock object. */
/* Report that a lock has been created at address "lock". */
#define ANNOTATE_RWLOCK_CREATE(lock) \
AnnotateRWLockCreate(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" is about to be destroyed. */
#define ANNOTATE_RWLOCK_DESTROY(lock) \
AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" has been acquired.
is_w=1 for writer lock, is_w=0 for reader lock. */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
/* Report that the lock at address "lock" is about to be released. */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
/* -------------------------------------------------------------
Annotations useful when implementing barriers. They are not
normally needed by modules that merely use barriers.
The "barrier" argument is a pointer to the barrier object. */
/* Report that the "barrier" has been initialized with initial "count".
If 'reinitialization_allowed' is true, initialization is allowed to happen
multiple times w/o calling barrier_destroy() */
#define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
AnnotateBarrierInit(__FILE__, __LINE__, barrier, count, \
reinitialization_allowed)
/* Report that we are about to enter barrier_wait("barrier"). */
#define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
AnnotateBarrierWaitBefore(__FILE__, __LINE__, barrier)
/* Report that we just exited barrier_wait("barrier"). */
#define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
AnnotateBarrierWaitAfter(__FILE__, __LINE__, barrier)
/* Report that the "barrier" has been destroyed. */
#define ANNOTATE_BARRIER_DESTROY(barrier) \
AnnotateBarrierDestroy(__FILE__, __LINE__, barrier)
/* -------------------------------------------------------------
Annotations useful for testing race detectors. */
/* Report that we expect a race on the variable at "address".
Use only in unit tests for a race detector. */
#define ANNOTATE_EXPECT_RACE(address, description) \
AnnotateExpectRace(__FILE__, __LINE__, address, description)
/* A no-op. Insert where you like to test the interceptors. */
#define ANNOTATE_NO_OP(arg) \
AnnotateNoOp(__FILE__, __LINE__, arg)
/* Force the race detector to flush its state. The actual effect depends on
* the implementation of the detector. */
#define ANNOTATE_FLUSH_STATE() \
AnnotateFlushState(__FILE__, __LINE__)
#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
#define ANNOTATE_RWLOCK_CREATE(lock) /* empty */
#define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
#define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */
#define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */
#define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */
#define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */
#define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */
#define ANNOTATE_CONDVAR_WAIT(cv) /* empty */
#define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */
#define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */
#define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */
#define ANNOTATE_HAPPENS_AFTER(obj) /* empty */
#define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */
#define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size) /* empty */
#define ANNOTATE_SWAP_MEMORY_RANGE(address, size) /* empty */
#define ANNOTATE_PCQ_CREATE(pcq) /* empty */
#define ANNOTATE_PCQ_DESTROY(pcq) /* empty */
#define ANNOTATE_PCQ_PUT(pcq) /* empty */
#define ANNOTATE_PCQ_GET(pcq) /* empty */
#define ANNOTATE_NEW_MEMORY(address, size) /* empty */
#define ANNOTATE_EXPECT_RACE(address, description) /* empty */
#define ANNOTATE_BENIGN_RACE(address, description) /* empty */
#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
#define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */
#define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */
#define ANNOTATE_TRACE_MEMORY(arg) /* empty */
#define ANNOTATE_THREAD_NAME(name) /* empty */
#define ANNOTATE_IGNORE_READS_BEGIN() /* empty */
#define ANNOTATE_IGNORE_READS_END() /* empty */
#define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */
#define ANNOTATE_IGNORE_WRITES_END() /* empty */
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */
#define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
#define ANNOTATE_NO_OP(arg) /* empty */
#define ANNOTATE_FLUSH_STATE() /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
/* Macro definitions for GCC attributes that allow static thread safety
analysis to recognize and use some of the dynamic annotations as
escape hatches.
TODO(lcwu): remove the check for __SUPPORT_DYN_ANNOTATION__ once the
default crosstool/GCC supports these GCC attributes. */
#define ANNOTALYSIS_STATIC_INLINE
#define ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY ;
#define ANNOTALYSIS_IGNORE_READS_BEGIN
#define ANNOTALYSIS_IGNORE_READS_END
#define ANNOTALYSIS_IGNORE_WRITES_BEGIN
#define ANNOTALYSIS_IGNORE_WRITES_END
#define ANNOTALYSIS_UNPROTECTED_READ
#if defined(__GNUC__) && (!defined(SWIG)) && (!defined(__clang__)) && \
defined(__SUPPORT_TS_ANNOTATION__) && defined(__SUPPORT_DYN_ANNOTATION__)
#if DYNAMIC_ANNOTATIONS_ENABLED == 0
#define ANNOTALYSIS_ONLY 1
#undef ANNOTALYSIS_STATIC_INLINE
#define ANNOTALYSIS_STATIC_INLINE static inline
#undef ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
#define ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY { (void)file; (void)line; }
#endif
/* Only emit attributes when annotalysis is enabled. */
#if defined(__SUPPORT_TS_ANNOTATION__) && defined(__SUPPORT_DYN_ANNOTATION__)
#undef ANNOTALYSIS_IGNORE_READS_BEGIN
#define ANNOTALYSIS_IGNORE_READS_BEGIN __attribute__ ((ignore_reads_begin))
#undef ANNOTALYSIS_IGNORE_READS_END
#define ANNOTALYSIS_IGNORE_READS_END __attribute__ ((ignore_reads_end))
#undef ANNOTALYSIS_IGNORE_WRITES_BEGIN
#define ANNOTALYSIS_IGNORE_WRITES_BEGIN __attribute__ ((ignore_writes_begin))
#undef ANNOTALYSIS_IGNORE_WRITES_END
#define ANNOTALYSIS_IGNORE_WRITES_END __attribute__ ((ignore_writes_end))
#undef ANNOTALYSIS_UNPROTECTED_READ
#define ANNOTALYSIS_UNPROTECTED_READ __attribute__ ((unprotected_read))
#endif
#endif // defined(__GNUC__) && (!defined(SWIG)) && (!defined(__clang__))
/* Use the macros above rather than using these functions directly. */
#ifdef __cplusplus
extern "C" {
#endif
void AnnotateRWLockCreate(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockDestroy(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockAcquired(const char *file, int line,
const volatile void *lock, long is_w);
void AnnotateRWLockReleased(const char *file, int line,
const volatile void *lock, long is_w);
void AnnotateBarrierInit(const char *file, int line,
const volatile void *barrier, long count,
long reinitialization_allowed);
void AnnotateBarrierWaitBefore(const char *file, int line,
const volatile void *barrier);
void AnnotateBarrierWaitAfter(const char *file, int line,
const volatile void *barrier);
void AnnotateBarrierDestroy(const char *file, int line,
const volatile void *barrier);
void AnnotateCondVarWait(const char *file, int line,
const volatile void *cv,
const volatile void *lock);
void AnnotateCondVarSignal(const char *file, int line,
const volatile void *cv);
void AnnotateCondVarSignalAll(const char *file, int line,
const volatile void *cv);
void AnnotatePublishMemoryRange(const char *file, int line,
const volatile void *address,
long size);
void AnnotateUnpublishMemoryRange(const char *file, int line,
const volatile void *address,
long size);
void AnnotatePCQCreate(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQDestroy(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQPut(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQGet(const char *file, int line,
const volatile void *pcq);
void AnnotateNewMemory(const char *file, int line,
const volatile void *address,
long size);
void AnnotateExpectRace(const char *file, int line,
const volatile void *address,
const char *description);
void AnnotateBenignRace(const char *file, int line,
const volatile void *address,
const char *description);
void AnnotateBenignRaceSized(const char *file, int line,
const volatile void *address,
long size,
const char *description);
void AnnotateMutexIsUsedAsCondVar(const char *file, int line,
const volatile void *mu);
void AnnotateTraceMemory(const char *file, int line,
const volatile void *arg);
void AnnotateThreadName(const char *file, int line,
const char *name);
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreReadsBegin(const char *file, int line)
ANNOTALYSIS_IGNORE_READS_BEGIN ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreReadsEnd(const char *file, int line)
ANNOTALYSIS_IGNORE_READS_END ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreWritesBegin(const char *file, int line)
ANNOTALYSIS_IGNORE_WRITES_BEGIN ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
ANNOTALYSIS_STATIC_INLINE
void AnnotateIgnoreWritesEnd(const char *file, int line)
ANNOTALYSIS_IGNORE_WRITES_END ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
void AnnotateEnableRaceDetection(const char *file, int line, int enable);
void AnnotateNoOp(const char *file, int line,
const volatile void *arg);
void AnnotateFlushState(const char *file, int line);
/* Return non-zero value if running under valgrind.
If "valgrind.h" is included into dynamic_annotations.c,
the regular valgrind mechanism will be used.
See http://valgrind.org/docs/manual/manual-core-adv.html about
RUNNING_ON_VALGRIND and other valgrind "client requests".
The file "valgrind.h" may be obtained by doing
svn co svn://svn.valgrind.org/valgrind/trunk/include
If for some reason you can't use "valgrind.h" or want to fake valgrind,
there are two ways to make this function return non-zero:
- Use environment variable: export RUNNING_ON_VALGRIND=1
- Make your tool intercept the function RunningOnValgrind() and
change its return value.
*/
int RunningOnValgrind(void);
/* ValgrindSlowdown returns:
* 1.0, if (RunningOnValgrind() == 0)
* 50.0, if (RunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") == NULL)
* atof(getenv("VALGRIND_SLOWDOWN")) otherwise
This function can be used to scale timeout values:
EXAMPLE:
for (;;) {
DoExpensiveBackgroundTask();
SleepForSeconds(5 * ValgrindSlowdown());
}
*/
double ValgrindSlowdown(void);
#ifdef __cplusplus
}
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
/* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
Instead of doing
ANNOTATE_IGNORE_READS_BEGIN();
... = x;
ANNOTATE_IGNORE_READS_END();
one can use
... = ANNOTATE_UNPROTECTED_READ(x); */
template <class T>
inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x)
ANNOTALYSIS_UNPROTECTED_READ {
ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
ANNOTATE_IGNORE_READS_END();
return res;
}
/* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
namespace { \
class static_var ## _annotator { \
public: \
static_var ## _annotator() { \
ANNOTATE_BENIGN_RACE_SIZED(&static_var, \
sizeof(static_var), \
# static_var ": " description); \
} \
}; \
static static_var ## _annotator the ## static_var ## _annotator;\
}
#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
#define ANNOTATE_UNPROTECTED_READ(x) (x)
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
/* Annotalysis, a GCC based static analyzer, is able to understand and use
some of the dynamic annotations defined in this file. However, dynamic
annotations are usually disabled in the opt mode (to avoid additional
runtime overheads) while Annotalysis only works in the opt mode.
In order for Annotalysis to use these dynamic annotations when they
are disabled, we re-define these annotations here. Note that unlike the
original macro definitions above, these macros are expanded to calls to
static inline functions so that the compiler will be able to remove the
calls after the analysis. */
#ifdef ANNOTALYSIS_ONLY
#undef ANNOTALYSIS_ONLY
/* Undefine and re-define the macros that the static analyzer understands. */
#undef ANNOTATE_IGNORE_READS_BEGIN
#define ANNOTATE_IGNORE_READS_BEGIN() \
AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_READS_END
#define ANNOTATE_IGNORE_READS_END() \
AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_WRITES_BEGIN
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_WRITES_END
#define ANNOTATE_IGNORE_WRITES_END() \
AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
#undef ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do { \
ANNOTATE_IGNORE_READS_BEGIN(); \
ANNOTATE_IGNORE_WRITES_BEGIN(); \
}while(0) \
#undef ANNOTATE_IGNORE_READS_AND_WRITES_END
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do { \
ANNOTATE_IGNORE_WRITES_END(); \
ANNOTATE_IGNORE_READS_END(); \
}while(0) \
#if defined(__cplusplus)
#undef ANNOTATE_UNPROTECTED_READ
template <class T>
inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x)
ANNOTALYSIS_UNPROTECTED_READ {
ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
ANNOTATE_IGNORE_READS_END();
return res;
}
#endif /* __cplusplus */
#endif /* ANNOTALYSIS_ONLY */
/* Undefine the macros intended only in this file. */
#undef ANNOTALYSIS_STATIC_INLINE
#undef ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
#endif /* BASE_DYNAMIC_ANNOTATIONS_H_ */

View File

@ -1,443 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in an in-memory Elf image.
//
#include "base/elf_mem_image.h"
#ifdef HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h
#include <stddef.h> // for size_t, ptrdiff_t
#include "base/logging.h"
// From binutils/include/elf/common.h (this doesn't appear to be documented
// anywhere else).
//
// /* This flag appears in a Versym structure. It means that the symbol
// is hidden, and is only visible with an explicit version number.
// This is a GNU extension. */
// #define VERSYM_HIDDEN 0x8000
//
// /* This is the mask for the rest of the Versym information. */
// #define VERSYM_VERSION 0x7fff
#define VERSYM_VERSION 0x7fff
#if __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-const-variable"
#endif
namespace base {
namespace {
template <int N> class ElfClass {
public:
static const int kElfClass = -1;
static int ElfBind(const ElfW(Sym) *) {
CHECK(false); // << "Unexpected word size";
return 0;
}
static int ElfType(const ElfW(Sym) *) {
CHECK(false); // << "Unexpected word size";
return 0;
}
};
template <> class ElfClass<32> {
public:
static const int kElfClass = ELFCLASS32;
static int ElfBind(const ElfW(Sym) *symbol) {
return ELF32_ST_BIND(symbol->st_info);
}
static int ElfType(const ElfW(Sym) *symbol) {
return ELF32_ST_TYPE(symbol->st_info);
}
};
template <> class ElfClass<64> {
public:
static const int kElfClass = ELFCLASS64;
static int ElfBind(const ElfW(Sym) *symbol) {
return ELF64_ST_BIND(symbol->st_info);
}
static int ElfType(const ElfW(Sym) *symbol) {
return ELF64_ST_TYPE(symbol->st_info);
}
};
typedef ElfClass<__WORDSIZE> CurrentElfClass;
// Extract an element from one of the ELF tables, cast it to desired type.
// This is just a simple arithmetic and a glorified cast.
// Callers are responsible for bounds checking.
template <class T>
const T* GetTableElement(const ElfW(Ehdr) *ehdr,
ElfW(Off) table_offset,
ElfW(Word) element_size,
size_t index) {
return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr)
+ table_offset
+ index * element_size);
}
} // namespace
const void *const ElfMemImage::kInvalidBase =
reinterpret_cast<const void *>(~0L);
ElfMemImage::ElfMemImage(const void *base) {
CHECK(base != kInvalidBase);
Init(base);
}
int ElfMemImage::GetNumSymbols() const {
if (!hash_) {
return 0;
}
// See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
return hash_[1];
}
const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
CHECK_LT(index, GetNumSymbols());
return dynsym_ + index;
}
const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
CHECK_LT(index, GetNumSymbols());
return versym_ + index;
}
const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const {
CHECK_LT(index, ehdr_->e_phnum);
return GetTableElement<ElfW(Phdr)>(ehdr_,
ehdr_->e_phoff,
ehdr_->e_phentsize,
index);
}
const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const {
CHECK_LT(offset, strsize_);
return dynstr_ + offset;
}
const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const {
if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) {
// Symbol corresponds to "special" (e.g. SHN_ABS) section.
return reinterpret_cast<const void *>(sym->st_value);
}
CHECK_LT(link_base_, sym->st_value);
return GetTableElement<char>(ehdr_, 0, 1, sym->st_value) - link_base_;
}
const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const {
CHECK_LE(index, verdefnum_);
const ElfW(Verdef) *version_definition = verdef_;
while (version_definition->vd_ndx < index && version_definition->vd_next) {
const char *const version_definition_as_char =
reinterpret_cast<const char *>(version_definition);
version_definition =
reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
version_definition->vd_next);
}
return version_definition->vd_ndx == index ? version_definition : NULL;
}
const ElfW(Verdaux) *ElfMemImage::GetVerdefAux(
const ElfW(Verdef) *verdef) const {
return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1);
}
const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const {
CHECK_LT(offset, strsize_);
return dynstr_ + offset;
}
void ElfMemImage::Init(const void *base) {
ehdr_ = NULL;
dynsym_ = NULL;
dynstr_ = NULL;
versym_ = NULL;
verdef_ = NULL;
hash_ = NULL;
strsize_ = 0;
verdefnum_ = 0;
link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
if (!base) {
return;
}
const intptr_t base_as_uintptr_t = reinterpret_cast<uintptr_t>(base);
// Fake VDSO has low bit set.
const bool fake_vdso = ((base_as_uintptr_t & 1) != 0);
base = reinterpret_cast<const void *>(base_as_uintptr_t & ~1);
const char *const base_as_char = reinterpret_cast<const char *>(base);
if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 ||
base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) {
RAW_DCHECK(false, "no ELF magic"); // at %p", base);
return;
}
int elf_class = base_as_char[EI_CLASS];
if (elf_class != CurrentElfClass::kElfClass) {
DCHECK_EQ(elf_class, CurrentElfClass::kElfClass);
return;
}
switch (base_as_char[EI_DATA]) {
case ELFDATA2LSB: {
if (__LITTLE_ENDIAN != __BYTE_ORDER) {
DCHECK_EQ(__LITTLE_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
return;
}
break;
}
case ELFDATA2MSB: {
if (__BIG_ENDIAN != __BYTE_ORDER) {
DCHECK_EQ(__BIG_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
return;
}
break;
}
default: {
RAW_DCHECK(false, "unexpected data encoding"); // << base_as_char[EI_DATA];
return;
}
}
ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
const ElfW(Phdr) *dynamic_program_header = NULL;
for (int i = 0; i < ehdr_->e_phnum; ++i) {
const ElfW(Phdr) *const program_header = GetPhdr(i);
switch (program_header->p_type) {
case PT_LOAD:
if (link_base_ == ~0L) {
link_base_ = program_header->p_vaddr;
}
break;
case PT_DYNAMIC:
dynamic_program_header = program_header;
break;
}
}
if (link_base_ == ~0L || !dynamic_program_header) {
RAW_DCHECK(~0L != link_base_, "no PT_LOADs in VDSO");
RAW_DCHECK(dynamic_program_header, "no PT_DYNAMIC in VDSO");
// Mark this image as not present. Can not recur infinitely.
Init(0);
return;
}
ptrdiff_t relocation =
base_as_char - reinterpret_cast<const char *>(link_base_);
ElfW(Dyn) *dynamic_entry =
reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
relocation);
for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
ElfW(Xword) value = dynamic_entry->d_un.d_val;
if (fake_vdso) {
// A complication: in the real VDSO, dynamic entries are not relocated
// (it wasn't loaded by a dynamic loader). But when testing with a
// "fake" dlopen()ed vdso library, the loader relocates some (but
// not all!) of them before we get here.
if (dynamic_entry->d_tag == DT_VERDEF) {
// The only dynamic entry (of the ones we care about) libc-2.3.6
// loader doesn't relocate.
value += relocation;
}
} else {
// Real VDSO. Everything needs to be relocated.
value += relocation;
}
switch (dynamic_entry->d_tag) {
case DT_HASH:
hash_ = reinterpret_cast<ElfW(Word) *>(value);
break;
case DT_SYMTAB:
dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
break;
case DT_STRTAB:
dynstr_ = reinterpret_cast<const char *>(value);
break;
case DT_VERSYM:
versym_ = reinterpret_cast<ElfW(Versym) *>(value);
break;
case DT_VERDEF:
verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
break;
case DT_VERDEFNUM:
verdefnum_ = dynamic_entry->d_un.d_val;
break;
case DT_STRSZ:
strsize_ = dynamic_entry->d_un.d_val;
break;
default:
// Unrecognized entries explicitly ignored.
break;
}
}
if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
!verdef_ || !verdefnum_ || !strsize_) {
RAW_DCHECK(hash_, "invalid VDSO (no DT_HASH)");
RAW_DCHECK(dynsym_, "invalid VDSO (no DT_SYMTAB)");
RAW_DCHECK(dynstr_, "invalid VDSO (no DT_STRTAB)");
RAW_DCHECK(versym_, "invalid VDSO (no DT_VERSYM)");
RAW_DCHECK(verdef_, "invalid VDSO (no DT_VERDEF)");
RAW_DCHECK(verdefnum_, "invalid VDSO (no DT_VERDEFNUM)");
RAW_DCHECK(strsize_, "invalid VDSO (no DT_STRSZ)");
// Mark this image as not present. Can not recur infinitely.
Init(0);
return;
}
}
bool ElfMemImage::LookupSymbol(const char *name,
const char *version,
int type,
SymbolInfo *info) const {
for (SymbolIterator it = begin(); it != end(); ++it) {
if (strcmp(it->name, name) == 0 && strcmp(it->version, version) == 0 &&
CurrentElfClass::ElfType(it->symbol) == type) {
if (info) {
*info = *it;
}
return true;
}
}
return false;
}
bool ElfMemImage::LookupSymbolByAddress(const void *address,
SymbolInfo *info_out) const {
for (SymbolIterator it = begin(); it != end(); ++it) {
const char *const symbol_start =
reinterpret_cast<const char *>(it->address);
const char *const symbol_end = symbol_start + it->symbol->st_size;
if (symbol_start <= address && address < symbol_end) {
if (info_out) {
// Client wants to know details for that symbol (the usual case).
if (CurrentElfClass::ElfBind(it->symbol) == STB_GLOBAL) {
// Strong symbol; just return it.
*info_out = *it;
return true;
} else {
// Weak or local. Record it, but keep looking for a strong one.
*info_out = *it;
}
} else {
// Client only cares if there is an overlapping symbol.
return true;
}
}
}
return false;
}
ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
: index_(index), image_(image) {
}
const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
return &info_;
}
const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const {
return info_;
}
bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const {
return this->image_ == rhs.image_ && this->index_ == rhs.index_;
}
bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const {
return !(*this == rhs);
}
ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() {
this->Update(1);
return *this;
}
ElfMemImage::SymbolIterator ElfMemImage::begin() const {
SymbolIterator it(this, 0);
it.Update(0);
return it;
}
ElfMemImage::SymbolIterator ElfMemImage::end() const {
return SymbolIterator(this, GetNumSymbols());
}
void ElfMemImage::SymbolIterator::Update(int increment) {
const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
CHECK(image->IsPresent() || increment == 0);
if (!image->IsPresent()) {
return;
}
index_ += increment;
if (index_ >= image->GetNumSymbols()) {
index_ = image->GetNumSymbols();
return;
}
const ElfW(Sym) *symbol = image->GetDynsym(index_);
const ElfW(Versym) *version_symbol = image->GetVersym(index_);
CHECK(symbol && version_symbol);
const char *const symbol_name = image->GetDynstr(symbol->st_name);
const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
const ElfW(Verdef) *version_definition = NULL;
const char *version_name = "";
if (symbol->st_shndx == SHN_UNDEF) {
// Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
// version_index could well be greater than verdefnum_, so calling
// GetVerdef(version_index) may trigger assertion.
} else {
version_definition = image->GetVerdef(version_index);
}
if (version_definition) {
// I am expecting 1 or 2 auxiliary entries: 1 for the version itself,
// optional 2nd if the version has a parent.
CHECK_LE(1, version_definition->vd_cnt);
CHECK_LE(version_definition->vd_cnt, 2);
const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition);
version_name = image->GetVerstr(version_aux->vda_name);
}
info_.name = symbol_name;
info_.version = version_name;
info_.address = image->GetSymAddr(symbol);
info_.symbol = symbol;
}
} // namespace base
#if __clang__
#pragma clang diagnostic pop
#endif
#endif // HAVE_ELF_MEM_IMAGE

View File

@ -1,135 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup for in-memory Elf images.
#ifndef BASE_ELF_MEM_IMAGE_H_
#define BASE_ELF_MEM_IMAGE_H_
#include "../config.h"
#ifdef HAVE_FEATURES_H
#include <features.h> // for __GLIBC__
#endif
// Maybe one day we can rewrite this file not to require the elf
// symbol extensions in glibc, but for right now we need them.
#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__)
#define HAVE_ELF_MEM_IMAGE 1
#include <stdlib.h>
#include <link.h> // for ElfW
namespace base {
// An in-memory ELF image (may not exist on disk).
class ElfMemImage {
public:
// Sentinel: there could never be an elf image at this address.
static const void *const kInvalidBase;
// Information about a single vdso symbol.
// All pointers are into .dynsym, .dynstr, or .text of the VDSO.
// Do not free() them or modify through them.
struct SymbolInfo {
const char *name; // E.g. "__vdso_getcpu"
const char *version; // E.g. "LINUX_2.6", could be ""
// for unversioned symbol.
const void *address; // Relocated symbol address.
const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table.
};
// Supports iteration over all dynamic symbols.
class SymbolIterator {
public:
friend class ElfMemImage;
const SymbolInfo *operator->() const;
const SymbolInfo &operator*() const;
SymbolIterator& operator++();
bool operator!=(const SymbolIterator &rhs) const;
bool operator==(const SymbolIterator &rhs) const;
private:
SymbolIterator(const void *const image, int index);
void Update(int incr);
SymbolInfo info_;
int index_;
const void *const image_;
};
explicit ElfMemImage(const void *base);
void Init(const void *base);
bool IsPresent() const { return ehdr_ != NULL; }
const ElfW(Phdr)* GetPhdr(int index) const;
const ElfW(Sym)* GetDynsym(int index) const;
const ElfW(Versym)* GetVersym(int index) const;
const ElfW(Verdef)* GetVerdef(int index) const;
const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
const char* GetDynstr(ElfW(Word) offset) const;
const void* GetSymAddr(const ElfW(Sym) *sym) const;
const char* GetVerstr(ElfW(Word) offset) const;
int GetNumSymbols() const;
SymbolIterator begin() const;
SymbolIterator end() const;
// Look up versioned dynamic symbol in the image.
// Returns false if image is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out != NULL, additional details are filled in.
bool LookupSymbol(const char *name, const char *version,
int symbol_type, SymbolInfo *info_out) const;
// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if image isn't present
// or doesn't have a symbol overlapping given address.
// If info_out != NULL, additional details are filled in.
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
private:
const ElfW(Ehdr) *ehdr_;
const ElfW(Sym) *dynsym_;
const ElfW(Versym) *versym_;
const ElfW(Verdef) *verdef_;
const ElfW(Word) *hash_;
const char *dynstr_;
size_t strsize_;
size_t verdefnum_;
ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
};
} // namespace base
#endif // __ELF__ and __GLIBC__ and !__native_client__
#endif // BASE_ELF_MEM_IMAGE_H_

View File

@ -1,401 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005-2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke, Carl Crous
*/
#ifndef _ELFCORE_H
#define _ELFCORE_H
#ifdef __cplusplus
extern "C" {
#endif
/* We currently only support x86-32, x86-64, ARM, MIPS, PPC on Linux.
* Porting to other related platforms should not be difficult.
*/
#if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
defined(__mips__) || defined(__PPC__)) && defined(__linux)
#include <stdarg.h>
#include <stdint.h>
#include <sys/types.h>
#include "../config.h"
/* Define the DUMPER symbol to make sure that there is exactly one
* core dumper built into the library.
*/
#define DUMPER "ELF"
/* By the time that we get a chance to read CPU registers in the
* calling thread, they are already in a not particularly useful
* state. Besides, there will be multiple frames on the stack that are
* just making the core file confusing. To fix this problem, we take a
* snapshot of the frame pointer, stack pointer, and instruction
* pointer at an earlier time, and then insert these values into the
* core file.
*/
#if defined(__i386__) || defined(__x86_64__)
typedef struct i386_regs { /* Normal (non-FPU) CPU registers */
#ifdef __x86_64__
#define BP rbp
#define SP rsp
#define IP rip
uint64_t r15,r14,r13,r12,rbp,rbx,r11,r10;
uint64_t r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax;
uint64_t rip,cs,eflags;
uint64_t rsp,ss;
uint64_t fs_base, gs_base;
uint64_t ds,es,fs,gs;
#else
#define BP ebp
#define SP esp
#define IP eip
uint32_t ebx, ecx, edx, esi, edi, ebp, eax;
uint16_t ds, __ds, es, __es;
uint16_t fs, __fs, gs, __gs;
uint32_t orig_eax, eip;
uint16_t cs, __cs;
uint32_t eflags, esp;
uint16_t ss, __ss;
#endif
} i386_regs;
#elif defined(__arm__)
typedef struct arm_regs { /* General purpose registers */
#define BP uregs[11] /* Frame pointer */
#define SP uregs[13] /* Stack pointer */
#define IP uregs[15] /* Program counter */
#define LR uregs[14] /* Link register */
long uregs[18];
} arm_regs;
#elif defined(__mips__)
typedef struct mips_regs {
unsigned long pad[6]; /* Unused padding to match kernel structures */
unsigned long uregs[32]; /* General purpose registers. */
unsigned long hi; /* Used for multiplication and division. */
unsigned long lo;
unsigned long cp0_epc; /* Program counter. */
unsigned long cp0_badvaddr;
unsigned long cp0_status;
unsigned long cp0_cause;
unsigned long unused;
} mips_regs;
#elif defined (__PPC__)
typedef struct ppc_regs {
#define SP uregs[1] /* Stack pointer */
#define IP rip /* Program counter */
#define LR lr /* Link register */
unsigned long uregs[32]; /* General Purpose Registers - r0-r31. */
double fpr[32]; /* Floating-Point Registers - f0-f31. */
unsigned long rip; /* Program counter. */
unsigned long msr;
unsigned long ccr;
unsigned long lr;
unsigned long ctr;
unsigned long xeq;
unsigned long mq;
} ppc_regs;
#endif
#if defined(__i386__) && defined(__GNUC__)
/* On x86 we provide an optimized version of the FRAME() macro, if the
* compiler supports a GCC-style asm() directive. This results in somewhat
* more accurate values for CPU registers.
*/
typedef struct Frame {
struct i386_regs uregs;
int errno_;
pid_t tid;
} Frame;
#define FRAME(f) Frame f; \
do { \
f.errno_ = errno; \
f.tid = sys_gettid(); \
__asm__ volatile ( \
"push %%ebp\n" \
"push %%ebx\n" \
"mov %%ebx,0(%%eax)\n" \
"mov %%ecx,4(%%eax)\n" \
"mov %%edx,8(%%eax)\n" \
"mov %%esi,12(%%eax)\n" \
"mov %%edi,16(%%eax)\n" \
"mov %%ebp,20(%%eax)\n" \
"mov %%eax,24(%%eax)\n" \
"mov %%ds,%%ebx\n" \
"mov %%ebx,28(%%eax)\n" \
"mov %%es,%%ebx\n" \
"mov %%ebx,32(%%eax)\n" \
"mov %%fs,%%ebx\n" \
"mov %%ebx,36(%%eax)\n" \
"mov %%gs,%%ebx\n" \
"mov %%ebx, 40(%%eax)\n" \
"call 0f\n" \
"0:pop %%ebx\n" \
"add $1f-0b,%%ebx\n" \
"mov %%ebx,48(%%eax)\n" \
"mov %%cs,%%ebx\n" \
"mov %%ebx,52(%%eax)\n" \
"pushf\n" \
"pop %%ebx\n" \
"mov %%ebx,56(%%eax)\n" \
"mov %%esp,%%ebx\n" \
"add $8,%%ebx\n" \
"mov %%ebx,60(%%eax)\n" \
"mov %%ss,%%ebx\n" \
"mov %%ebx,64(%%eax)\n" \
"pop %%ebx\n" \
"pop %%ebp\n" \
"1:" \
: : "a" (&f) : "memory"); \
} while (0)
#define SET_FRAME(f,r) \
do { \
errno = (f).errno_; \
(r) = (f).uregs; \
} while (0)
#elif defined(__x86_64__) && defined(__GNUC__)
/* The FRAME and SET_FRAME macros for x86_64. */
typedef struct Frame {
struct i386_regs uregs;
int errno_;
pid_t tid;
} Frame;
#define FRAME(f) Frame f; \
do { \
f.errno_ = errno; \
f.tid = sys_gettid(); \
__asm__ volatile ( \
"push %%rbp\n" \
"push %%rbx\n" \
"mov %%r15,0(%%rax)\n" \
"mov %%r14,8(%%rax)\n" \
"mov %%r13,16(%%rax)\n" \
"mov %%r12,24(%%rax)\n" \
"mov %%rbp,32(%%rax)\n" \
"mov %%rbx,40(%%rax)\n" \
"mov %%r11,48(%%rax)\n" \
"mov %%r10,56(%%rax)\n" \
"mov %%r9,64(%%rax)\n" \
"mov %%r8,72(%%rax)\n" \
"mov %%rax,80(%%rax)\n" \
"mov %%rcx,88(%%rax)\n" \
"mov %%rdx,96(%%rax)\n" \
"mov %%rsi,104(%%rax)\n" \
"mov %%rdi,112(%%rax)\n" \
"mov %%ds,%%rbx\n" \
"mov %%rbx,184(%%rax)\n" \
"mov %%es,%%rbx\n" \
"mov %%rbx,192(%%rax)\n" \
"mov %%fs,%%rbx\n" \
"mov %%rbx,200(%%rax)\n" \
"mov %%gs,%%rbx\n" \
"mov %%rbx,208(%%rax)\n" \
"call 0f\n" \
"0:pop %%rbx\n" \
"add $1f-0b,%%rbx\n" \
"mov %%rbx,128(%%rax)\n" \
"mov %%cs,%%rbx\n" \
"mov %%rbx,136(%%rax)\n" \
"pushf\n" \
"pop %%rbx\n" \
"mov %%rbx,144(%%rax)\n" \
"mov %%rsp,%%rbx\n" \
"add $16,%%ebx\n" \
"mov %%rbx,152(%%rax)\n" \
"mov %%ss,%%rbx\n" \
"mov %%rbx,160(%%rax)\n" \
"pop %%rbx\n" \
"pop %%rbp\n" \
"1:" \
: : "a" (&f) : "memory"); \
} while (0)
#define SET_FRAME(f,r) \
do { \
errno = (f).errno_; \
(f).uregs.fs_base = (r).fs_base; \
(f).uregs.gs_base = (r).gs_base; \
(r) = (f).uregs; \
} while (0)
#elif defined(__arm__) && defined(__GNUC__)
/* ARM calling conventions are a little more tricky. A little assembly
* helps in obtaining an accurate snapshot of all registers.
*/
typedef struct Frame {
struct arm_regs arm;
int errno_;
pid_t tid;
} Frame;
#define FRAME(f) Frame f; \
do { \
long cpsr; \
f.errno_ = errno; \
f.tid = sys_gettid(); \
__asm__ volatile( \
"stmia %0, {r0-r15}\n" /* All integer regs */\
: : "r"(&f.arm) : "memory"); \
f.arm.uregs[16] = 0; \
__asm__ volatile( \
"mrs %0, cpsr\n" /* Condition code reg */\
: "=r"(cpsr)); \
f.arm.uregs[17] = cpsr; \
} while (0)
#define SET_FRAME(f,r) \
do { \
/* Don't override the FPU status register. */\
/* Use the value obtained from ptrace(). This*/\
/* works, because our code does not perform */\
/* any FPU operations, itself. */\
long fps = (f).arm.uregs[16]; \
errno = (f).errno_; \
(r) = (f).arm; \
(r).uregs[16] = fps; \
} while (0)
#elif defined(__mips__) && defined(__GNUC__)
typedef struct Frame {
struct mips_regs mips_regs;
int errno_;
pid_t tid;
} Frame;
#define MIPSREG(n) ({ register unsigned long r __asm__("$"#n); r; })
#define FRAME(f) Frame f = { 0 }; \
do { \
unsigned long hi, lo; \
register unsigned long pc __asm__("$31"); \
f.mips_regs.uregs[ 0] = MIPSREG( 0); \
f.mips_regs.uregs[ 1] = MIPSREG( 1); \
f.mips_regs.uregs[ 2] = MIPSREG( 2); \
f.mips_regs.uregs[ 3] = MIPSREG( 3); \
f.mips_regs.uregs[ 4] = MIPSREG( 4); \
f.mips_regs.uregs[ 5] = MIPSREG( 5); \
f.mips_regs.uregs[ 6] = MIPSREG( 6); \
f.mips_regs.uregs[ 7] = MIPSREG( 7); \
f.mips_regs.uregs[ 8] = MIPSREG( 8); \
f.mips_regs.uregs[ 9] = MIPSREG( 9); \
f.mips_regs.uregs[10] = MIPSREG(10); \
f.mips_regs.uregs[11] = MIPSREG(11); \
f.mips_regs.uregs[12] = MIPSREG(12); \
f.mips_regs.uregs[13] = MIPSREG(13); \
f.mips_regs.uregs[14] = MIPSREG(14); \
f.mips_regs.uregs[15] = MIPSREG(15); \
f.mips_regs.uregs[16] = MIPSREG(16); \
f.mips_regs.uregs[17] = MIPSREG(17); \
f.mips_regs.uregs[18] = MIPSREG(18); \
f.mips_regs.uregs[19] = MIPSREG(19); \
f.mips_regs.uregs[20] = MIPSREG(20); \
f.mips_regs.uregs[21] = MIPSREG(21); \
f.mips_regs.uregs[22] = MIPSREG(22); \
f.mips_regs.uregs[23] = MIPSREG(23); \
f.mips_regs.uregs[24] = MIPSREG(24); \
f.mips_regs.uregs[25] = MIPSREG(25); \
f.mips_regs.uregs[26] = MIPSREG(26); \
f.mips_regs.uregs[27] = MIPSREG(27); \
f.mips_regs.uregs[28] = MIPSREG(28); \
f.mips_regs.uregs[29] = MIPSREG(29); \
f.mips_regs.uregs[30] = MIPSREG(30); \
f.mips_regs.uregs[31] = MIPSREG(31); \
__asm__ volatile ("mfhi %0" : "=r"(hi)); \
__asm__ volatile ("mflo %0" : "=r"(lo)); \
__asm__ volatile ("jal 1f; 1:nop" : "=r"(pc)); \
f.mips_regs.hi = hi; \
f.mips_regs.lo = lo; \
f.mips_regs.cp0_epc = pc; \
f.errno_ = errno; \
f.tid = sys_gettid(); \
} while (0)
#define SET_FRAME(f,r) \
do { \
errno = (f).errno_; \
memcpy((r).uregs, (f).mips_regs.uregs, \
32*sizeof(unsigned long)); \
(r).hi = (f).mips_regs.hi; \
(r).lo = (f).mips_regs.lo; \
(r).cp0_epc = (f).mips_regs.cp0_epc; \
} while (0)
#else
/* If we do not have a hand-optimized assembly version of the FRAME()
* macro, we cannot reliably unroll the stack. So, we show a few additional
* stack frames for the coredumper.
*/
typedef struct Frame {
pid_t tid;
} Frame;
#define FRAME(f) Frame f; do { f.tid = sys_gettid(); } while (0)
#define SET_FRAME(f,r) do { } while (0)
#endif
/* Internal function for generating a core file. This API can change without
* notice and is only supposed to be used internally by the core dumper.
*
* This function works for both single- and multi-threaded core
* dumps. If called as
*
* FRAME(frame);
* InternalGetCoreDump(&frame, 0, NULL, ap);
*
* it creates a core file that only contains information about the
* calling thread.
*
* Optionally, the caller can provide information about other threads
* by passing their process ids in "thread_pids". The process id of
* the caller should not be included in this array. All of the threads
* must have been attached to with ptrace(), prior to calling this
* function. They will be detached when "InternalGetCoreDump()" returns.
*
* This function either returns a file handle that can be read for obtaining
* a core dump, or "-1" in case of an error. In the latter case, "errno"
* will be set appropriately.
*
* While "InternalGetCoreDump()" is not technically async signal safe, you
* might be tempted to invoke it from a signal handler. The code goes to
* great lengths to make a best effort that this will actually work. But in
* any case, you must make sure that you preserve the value of "errno"
* yourself. It is guaranteed to be clobbered otherwise.
*
* Also, "InternalGetCoreDump" is not strictly speaking re-entrant. Again,
* it makes a best effort to behave reasonably when called in a multi-
* threaded environment, but it is ultimately the caller's responsibility
* to provide locking.
*/
int InternalGetCoreDump(void *frame, int num_threads, pid_t *thread_pids,
va_list ap
/* const struct CoreDumpParameters *params,
const char *file_name,
const char *PATH
*/);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ELFCORE_H */

View File

@ -1,74 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Jacob Hoffman-Andrews
#ifndef _GOOGLEINIT_H
#define _GOOGLEINIT_H
#include "base/logging.h"
class GoogleInitializer {
public:
typedef void (*VoidFunction)(void);
GoogleInitializer(const char* name, VoidFunction ctor, VoidFunction dtor)
: name_(name), destructor_(dtor) {
RAW_VLOG(10, "<GoogleModuleObject> constructing: %s\n", name_);
if (ctor)
ctor();
}
~GoogleInitializer() {
RAW_VLOG(10, "<GoogleModuleObject> destroying: %s\n", name_);
if (destructor_)
destructor_();
}
private:
const char* const name_;
const VoidFunction destructor_;
};
#define REGISTER_MODULE_INITIALIZER(name, body) \
namespace { \
static void google_init_module_##name () { body; } \
GoogleInitializer google_initializer_module_##name(#name, \
google_init_module_##name, NULL); \
}
#define REGISTER_MODULE_DESTRUCTOR(name, body) \
namespace { \
static void google_destruct_module_##name () { body; } \
GoogleInitializer google_destructor_module_##name(#name, \
NULL, google_destruct_module_##name); \
}
#endif /* _GOOGLEINIT_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,707 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#include "base/linuxthreads.h"
#ifdef THREADS
#ifdef __cplusplus
extern "C" {
#endif
#include <sched.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <sys/prctl.h>
#include <semaphore.h>
#include "base/linux_syscall_support.h"
#include "base/thread_lister.h"
#ifndef CLONE_UNTRACED
#define CLONE_UNTRACED 0x00800000
#endif
/* Synchronous signals that should not be blocked while in the lister thread.
*/
static const int sync_signals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
SIGXCPU, SIGXFSZ };
/* itoa() is not a standard function, and we cannot safely call printf()
* after suspending threads. So, we just implement our own copy. A
* recursive approach is the easiest here.
*/
static char *local_itoa(char *buf, int i) {
if (i < 0) {
*buf++ = '-';
return local_itoa(buf, -i);
} else {
if (i >= 10)
buf = local_itoa(buf, i/10);
*buf++ = (i%10) + '0';
*buf = '\000';
return buf;
}
}
/* Wrapper around clone() that runs "fn" on the same stack as the
* caller! Unlike fork(), the cloned thread shares the same address space.
* The caller must be careful to use only minimal amounts of stack until
* the cloned thread has returned.
* There is a good chance that the cloned thread and the caller will share
* the same copy of errno!
*/
#ifdef __GNUC__
#if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3
/* Try to force this function into a separate stack frame, and make sure
* that arguments are passed on the stack.
*/
static int local_clone (int (*fn)(void *), void *arg, ...)
__attribute__ ((noinline));
#endif
#endif
/* To avoid the gap cross page boundaries, increase by the large parge
* size mostly PowerPC system uses. */
#ifdef __PPC64__
#define CLONE_STACK_SIZE 65536
#else
#define CLONE_STACK_SIZE 4096
#endif
static int local_clone (int (*fn)(void *), void *arg, ...) {
/* Leave 4kB of gap between the callers stack and the new clone. This
* should be more than sufficient for the caller to call waitpid() until
* the cloned thread terminates.
*
* It is important that we set the CLONE_UNTRACED flag, because newer
* versions of "gdb" otherwise attempt to attach to our thread, and will
* attempt to reap its status codes. This subsequently results in the
* caller hanging indefinitely in waitpid(), waiting for a change in
* status that will never happen. By setting the CLONE_UNTRACED flag, we
* prevent "gdb" from stealing events, but we still expect the thread
* lister to fail, because it cannot PTRACE_ATTACH to the process that
* is being debugged. This is OK and the error code will be reported
* correctly.
*/
return sys_clone(fn, (char *)&arg - CLONE_STACK_SIZE,
CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_UNTRACED, arg, 0, 0, 0);
}
/* Local substitute for the atoi() function, which is not necessarily safe
* to call once threads are suspended (depending on whether libc looks up
* locale information, when executing atoi()).
*/
static int local_atoi(const char *s) {
int n = 0;
int neg = *s == '-';
if (neg)
s++;
while (*s >= '0' && *s <= '9')
n = 10*n + (*s++ - '0');
return neg ? -n : n;
}
/* Re-runs fn until it doesn't cause EINTR
*/
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
/* Wrap a class around system calls, in order to give us access to
* a private copy of errno. This only works in C++, but it has the
* advantage of not needing nested functions, which are a non-standard
* language extension.
*/
#ifdef __cplusplus
namespace {
class SysCalls {
public:
#define SYS_CPLUSPLUS
#define SYS_ERRNO my_errno
#define SYS_INLINE inline
#define SYS_PREFIX -1
#undef SYS_LINUX_SYSCALL_SUPPORT_H
#include "linux_syscall_support.h"
SysCalls() : my_errno(0) { }
int my_errno;
};
}
#define ERRNO sys.my_errno
#else
#define ERRNO my_errno
#endif
/* Wrapper for open() which is guaranteed to never return EINTR.
*/
static int c_open(const char *fname, int flags, int mode) {
ssize_t rc;
NO_INTR(rc = sys_open(fname, flags, mode));
return rc;
}
/* abort() is not safely reentrant, and changes it's behavior each time
* it is called. This means, if the main application ever called abort()
* we cannot safely call it again. This would happen if we were called
* from a SIGABRT signal handler in the main application. So, document
* that calling SIGABRT from the thread lister makes it not signal safe
* (and vice-versa).
* Also, since we share address space with the main application, we
* cannot call abort() from the callback and expect the main application
* to behave correctly afterwards. In fact, the only thing we can do, is
* to terminate the main application with extreme prejudice (aka
* PTRACE_KILL).
* We set up our own SIGABRT handler to do this.
* In order to find the main application from the signal handler, we
* need to store information about it in global variables. This is
* safe, because the main application should be suspended at this
* time. If the callback ever called TCMalloc_ResumeAllProcessThreads(), then
* we are running a higher risk, though. So, try to avoid calling
* abort() after calling TCMalloc_ResumeAllProcessThreads.
*/
static volatile int *sig_pids, sig_num_threads, sig_proc, sig_marker;
/* Signal handler to help us recover from dying while we are attached to
* other threads.
*/
static void SignalHandler(int signum, siginfo_t *si, void *data) {
if (sig_pids != NULL) {
if (signum == SIGABRT) {
while (sig_num_threads-- > 0) {
/* Not sure if sched_yield is really necessary here, but it does not */
/* hurt, and it might be necessary for the same reasons that we have */
/* to do so in sys_ptrace_detach(). */
sys_sched_yield();
sys_ptrace(PTRACE_KILL, sig_pids[sig_num_threads], 0, 0);
}
} else if (sig_num_threads > 0) {
TCMalloc_ResumeAllProcessThreads(sig_num_threads, (int *)sig_pids);
}
}
sig_pids = NULL;
if (sig_marker >= 0)
NO_INTR(sys_close(sig_marker));
sig_marker = -1;
if (sig_proc >= 0)
NO_INTR(sys_close(sig_proc));
sig_proc = -1;
sys__exit(signum == SIGABRT ? 1 : 2);
}
/* Try to dirty the stack, and hope that the compiler is not smart enough
* to optimize this function away. Or worse, the compiler could inline the
* function and permanently allocate the data on the stack.
*/
static void DirtyStack(size_t amount) {
char buf[amount];
memset(buf, 0, amount);
sys_read(-1, buf, amount);
}
/* Data structure for passing arguments to the lister thread.
*/
#define ALT_STACKSIZE (MINSIGSTKSZ + 4096)
struct ListerParams {
int result, err;
char *altstack_mem;
ListAllProcessThreadsCallBack callback;
void *parameter;
va_list ap;
sem_t *lock;
};
static void ListerThread(struct ListerParams *args) {
int found_parent = 0;
pid_t clone_pid = sys_gettid(), ppid = sys_getppid();
char proc_self_task[80], marker_name[48], *marker_path;
const char *proc_paths[3];
const char *const *proc_path = proc_paths;
int proc = -1, marker = -1, num_threads = 0;
int max_threads = 0, sig;
struct kernel_stat marker_sb, proc_sb;
stack_t altstack;
/* Wait for parent thread to set appropriate permissions
* to allow ptrace activity
*/
if (sem_wait(args->lock) < 0) {
goto failure;
}
/* Create "marker" that we can use to detect threads sharing the same
* address space and the same file handles. By setting the FD_CLOEXEC flag
* we minimize the risk of misidentifying child processes as threads;
* and since there is still a race condition, we will filter those out
* later, anyway.
*/
if ((marker = sys_socket(PF_LOCAL, SOCK_DGRAM, 0)) < 0 ||
sys_fcntl(marker, F_SETFD, FD_CLOEXEC) < 0) {
failure:
args->result = -1;
args->err = errno;
if (marker >= 0)
NO_INTR(sys_close(marker));
sig_marker = marker = -1;
if (proc >= 0)
NO_INTR(sys_close(proc));
sig_proc = proc = -1;
sys__exit(1);
}
/* Compute search paths for finding thread directories in /proc */
local_itoa(strrchr(strcpy(proc_self_task, "/proc/"), '\000'), ppid);
strcpy(marker_name, proc_self_task);
marker_path = marker_name + strlen(marker_name);
strcat(proc_self_task, "/task/");
proc_paths[0] = proc_self_task; /* /proc/$$/task/ */
proc_paths[1] = "/proc/"; /* /proc/ */
proc_paths[2] = NULL;
/* Compute path for marker socket in /proc */
local_itoa(strcpy(marker_path, "/fd/") + 4, marker);
if (sys_stat(marker_name, &marker_sb) < 0) {
goto failure;
}
/* Catch signals on an alternate pre-allocated stack. This way, we can
* safely execute the signal handler even if we ran out of memory.
*/
memset(&altstack, 0, sizeof(altstack));
altstack.ss_sp = args->altstack_mem;
altstack.ss_flags = 0;
altstack.ss_size = ALT_STACKSIZE;
sys_sigaltstack(&altstack, (const stack_t *)NULL);
/* Some kernels forget to wake up traced processes, when the
* tracer dies. So, intercept synchronous signals and make sure
* that we wake up our tracees before dying. It is the caller's
* responsibility to ensure that asynchronous signals do not
* interfere with this function.
*/
sig_marker = marker;
sig_proc = -1;
for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
struct kernel_sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction_ = SignalHandler;
sys_sigfillset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK|SA_SIGINFO|SA_RESETHAND;
sys_sigaction(sync_signals[sig], &sa, (struct kernel_sigaction *)NULL);
}
/* Read process directories in /proc/... */
for (;;) {
/* Some kernels know about threads, and hide them in "/proc"
* (although they are still there, if you know the process
* id). Threads are moved into a separate "task" directory. We
* check there first, and then fall back on the older naming
* convention if necessary.
*/
if ((sig_proc = proc = c_open(*proc_path, O_RDONLY|O_DIRECTORY, 0)) < 0) {
if (*++proc_path != NULL)
continue;
goto failure;
}
if (sys_fstat(proc, &proc_sb) < 0)
goto failure;
/* Since we are suspending threads, we cannot call any libc
* functions that might acquire locks. Most notably, we cannot
* call malloc(). So, we have to allocate memory on the stack,
* instead. Since we do not know how much memory we need, we
* make a best guess. And if we guessed incorrectly we retry on
* a second iteration (by jumping to "detach_threads").
*
* Unless the number of threads is increasing very rapidly, we
* should never need to do so, though, as our guestimate is very
* conservative.
*/
if (max_threads < proc_sb.st_nlink + 100)
max_threads = proc_sb.st_nlink + 100;
/* scope */ {
pid_t pids[max_threads];
int added_entries = 0;
sig_num_threads = num_threads;
sig_pids = pids;
for (;;) {
struct KERNEL_DIRENT *entry;
char buf[4096];
ssize_t nbytes = GETDENTS(proc, (struct KERNEL_DIRENT *)buf,
sizeof(buf));
if (nbytes < 0)
goto failure;
else if (nbytes == 0) {
if (added_entries) {
/* Need to keep iterating over "/proc" in multiple
* passes until we no longer find any more threads. This
* algorithm eventually completes, when all threads have
* been suspended.
*/
added_entries = 0;
sys_lseek(proc, 0, SEEK_SET);
continue;
}
break;
}
for (entry = (struct KERNEL_DIRENT *)buf;
entry < (struct KERNEL_DIRENT *)&buf[nbytes];
entry = (struct KERNEL_DIRENT *)((char *)entry+entry->d_reclen)) {
if (entry->d_ino != 0) {
const char *ptr = entry->d_name;
pid_t pid;
/* Some kernels hide threads by preceding the pid with a '.' */
if (*ptr == '.')
ptr++;
/* If the directory is not numeric, it cannot be a
* process/thread
*/
if (*ptr < '0' || *ptr > '9')
continue;
pid = local_atoi(ptr);
/* Attach (and suspend) all threads */
if (pid && pid != clone_pid) {
struct kernel_stat tmp_sb;
char fname[entry->d_reclen + 48];
strcat(strcat(strcpy(fname, "/proc/"),
entry->d_name), marker_path);
/* Check if the marker is identical to the one we created */
if (sys_stat(fname, &tmp_sb) >= 0 &&
marker_sb.st_ino == tmp_sb.st_ino) {
long i, j;
/* Found one of our threads, make sure it is no duplicate */
for (i = 0; i < num_threads; i++) {
/* Linear search is slow, but should not matter much for
* the typically small number of threads.
*/
if (pids[i] == pid) {
/* Found a duplicate; most likely on second pass */
goto next_entry;
}
}
/* Check whether data structure needs growing */
if (num_threads >= max_threads) {
/* Back to square one, this time with more memory */
NO_INTR(sys_close(proc));
goto detach_threads;
}
/* Attaching to thread suspends it */
pids[num_threads++] = pid;
sig_num_threads = num_threads;
if (sys_ptrace(PTRACE_ATTACH, pid, (void *)0,
(void *)0) < 0) {
/* If operation failed, ignore thread. Maybe it
* just died? There might also be a race
* condition with a concurrent core dumper or
* with a debugger. In that case, we will just
* make a best effort, rather than failing
* entirely.
*/
num_threads--;
sig_num_threads = num_threads;
goto next_entry;
}
while (sys_waitpid(pid, (int *)0, __WALL) < 0) {
if (errno != EINTR) {
sys_ptrace_detach(pid);
num_threads--;
sig_num_threads = num_threads;
goto next_entry;
}
}
if (sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i++ != j ||
sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i != j) {
/* Address spaces are distinct, even though both
* processes show the "marker". This is probably
* a forked child process rather than a thread.
*/
sys_ptrace_detach(pid);
num_threads--;
sig_num_threads = num_threads;
} else {
found_parent |= pid == ppid;
added_entries++;
}
}
}
}
next_entry:;
}
}
NO_INTR(sys_close(proc));
sig_proc = proc = -1;
/* If we failed to find any threads, try looking somewhere else in
* /proc. Maybe, threads are reported differently on this system.
*/
if (num_threads > 1 || !*++proc_path) {
NO_INTR(sys_close(marker));
sig_marker = marker = -1;
/* If we never found the parent process, something is very wrong.
* Most likely, we are running in debugger. Any attempt to operate
* on the threads would be very incomplete. Let's just report an
* error to the caller.
*/
if (!found_parent) {
TCMalloc_ResumeAllProcessThreads(num_threads, pids);
sys__exit(3);
}
/* Now we are ready to call the callback,
* which takes care of resuming the threads for us.
*/
args->result = args->callback(args->parameter, num_threads,
pids, args->ap);
args->err = errno;
/* Callback should have resumed threads, but better safe than sorry */
if (TCMalloc_ResumeAllProcessThreads(num_threads, pids)) {
/* Callback forgot to resume at least one thread, report error */
args->err = EINVAL;
args->result = -1;
}
sys__exit(0);
}
detach_threads:
/* Resume all threads prior to retrying the operation */
TCMalloc_ResumeAllProcessThreads(num_threads, pids);
sig_pids = NULL;
num_threads = 0;
sig_num_threads = num_threads;
max_threads += 100;
}
}
}
/* This function gets the list of all linux threads of the current process
* passes them to the 'callback' along with the 'parameter' pointer; at the
* call back call time all the threads are paused via
* PTRACE_ATTACH.
* The callback is executed from a separate thread which shares only the
* address space, the filesystem, and the filehandles with the caller. Most
* notably, it does not share the same pid and ppid; and if it terminates,
* the rest of the application is still there. 'callback' is supposed to do
* or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if
* the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
* signals are blocked. If the 'callback' decides to unblock them, it must
* ensure that they cannot terminate the application, or that
* TCMalloc_ResumeAllProcessThreads will get called.
* It is an error for the 'callback' to make any library calls that could
* acquire locks. Most notably, this means that most system calls have to
* avoid going through libc. Also, this means that it is not legal to call
* exit() or abort().
* We return -1 on error and the return value of 'callback' on success.
*/
int TCMalloc_ListAllProcessThreads(void *parameter,
ListAllProcessThreadsCallBack callback, ...) {
char altstack_mem[ALT_STACKSIZE];
struct ListerParams args;
pid_t clone_pid;
int dumpable = 1, sig;
struct kernel_sigset_t sig_blocked, sig_old;
sem_t lock;
va_start(args.ap, callback);
/* If we are short on virtual memory, initializing the alternate stack
* might trigger a SIGSEGV. Let's do this early, before it could get us
* into more trouble (i.e. before signal handlers try to use the alternate
* stack, and before we attach to other threads).
*/
memset(altstack_mem, 0, sizeof(altstack_mem));
/* Some of our cleanup functions could conceivable use more stack space.
* Try to touch the stack right now. This could be defeated by the compiler
* being too smart for it's own good, so try really hard.
*/
DirtyStack(32768);
/* Make this process "dumpable". This is necessary in order to ptrace()
* after having called setuid().
*/
dumpable = sys_prctl(PR_GET_DUMPABLE, 0);
if (!dumpable)
sys_prctl(PR_SET_DUMPABLE, 1);
/* Fill in argument block for dumper thread */
args.result = -1;
args.err = 0;
args.altstack_mem = altstack_mem;
args.parameter = parameter;
args.callback = callback;
args.lock = &lock;
/* Before cloning the thread lister, block all asynchronous signals, as we */
/* are not prepared to handle them. */
sys_sigfillset(&sig_blocked);
for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
sys_sigdelset(&sig_blocked, sync_signals[sig]);
}
if (sys_sigprocmask(SIG_BLOCK, &sig_blocked, &sig_old)) {
args.err = errno;
args.result = -1;
goto failed;
}
/* scope */ {
/* After cloning, both the parent and the child share the same instance
* of errno. We must make sure that at least one of these processes
* (in our case, the parent) uses modified syscall macros that update
* a local copy of errno, instead.
*/
#ifdef __cplusplus
#define sys0_sigprocmask sys.sigprocmask
#define sys0_waitpid sys.waitpid
SysCalls sys;
#else
int my_errno;
#define SYS_ERRNO my_errno
#define SYS_INLINE inline
#define SYS_PREFIX 0
#undef SYS_LINUX_SYSCALL_SUPPORT_H
#include "linux_syscall_support.h"
#endif
/* Lock before clone so that parent can set
* ptrace permissions (if necessary) prior
* to ListerThread actually executing
*/
if (sem_init(&lock, 0, 0) == 0) {
int clone_errno;
clone_pid = local_clone((int (*)(void *))ListerThread, &args);
clone_errno = errno;
sys_sigprocmask(SIG_SETMASK, &sig_old, &sig_old);
if (clone_pid >= 0) {
#ifdef PR_SET_PTRACER
/* In newer versions of glibc permission must explicitly
* be given to allow for ptrace.
*/
prctl(PR_SET_PTRACER, clone_pid, 0, 0, 0);
#endif
/* Releasing the lock here allows the
* ListerThread to execute and ptrace us.
*/
sem_post(&lock);
int status, rc;
while ((rc = sys0_waitpid(clone_pid, &status, __WALL)) < 0 &&
ERRNO == EINTR) {
/* Keep waiting */
}
if (rc < 0) {
args.err = ERRNO;
args.result = -1;
} else if (WIFEXITED(status)) {
switch (WEXITSTATUS(status)) {
case 0: break; /* Normal process termination */
case 2: args.err = EFAULT; /* Some fault (e.g. SIGSEGV) detected */
args.result = -1;
break;
case 3: args.err = EPERM; /* Process is already being traced */
args.result = -1;
break;
default:args.err = ECHILD; /* Child died unexpectedly */
args.result = -1;
break;
}
} else if (!WIFEXITED(status)) {
args.err = EFAULT; /* Terminated due to an unhandled signal*/
args.result = -1;
}
sem_destroy(&lock);
} else {
args.result = -1;
args.err = clone_errno;
}
} else {
args.result = -1;
args.err = errno;
}
}
/* Restore the "dumpable" state of the process */
failed:
if (!dumpable)
sys_prctl(PR_SET_DUMPABLE, dumpable);
va_end(args.ap);
errno = args.err;
return args.result;
}
/* This function resumes the list of all linux threads that
* TCMalloc_ListAllProcessThreads pauses before giving to its callback.
* The function returns non-zero if at least one thread was
* suspended and has now been resumed.
*/
int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) {
int detached_at_least_one = 0;
while (num_threads-- > 0) {
detached_at_least_one |= sys_ptrace_detach(thread_pids[num_threads]) >= 0;
}
return detached_at_least_one;
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,54 +0,0 @@
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#ifndef _LINUXTHREADS_H
#define _LINUXTHREADS_H
/* Include thread_lister.h to get the interface that we implement for linux.
*/
/* We currently only support certain platforms on Linux. Porting to other
* related platforms should not be difficult.
*/
#if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
defined(__mips__) || defined(__PPC__) || defined(__aarch64__) || \
defined(__s390__)) && defined(__linux)
/* Define the THREADS symbol to make sure that there is exactly one core dumper
* built into the library.
*/
#define THREADS "Linux /proc"
#endif
#endif /* _LINUXTHREADS_H */

View File

@ -1,108 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file just provides storage for FLAGS_verbose.
#include "../config.h"
#include "base/logging.h"
#include "base/commandlineflags.h"
DEFINE_int32(verbose, EnvToInt("PERFTOOLS_VERBOSE", 0),
"Set to numbers >0 for more verbose output, or <0 for less. "
"--verbose == -4 means we log fatal errors only.");
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
// While windows does have a POSIX-compatible API
// (_open/_write/_close), it acquires memory. Using this lower-level
// windows API is the closest we can get to being "raw".
RawFD RawOpenForWriting(const char* filename) {
// CreateFile allocates memory if file_name isn't absolute, so if
// that ever becomes a problem then we ought to compute the absolute
// path on its behalf (perhaps the ntdll/kernel function isn't aware
// of the working directory?)
RawFD fd = CreateFileA(filename, GENERIC_WRITE, 0, NULL,
CREATE_ALWAYS, 0, NULL);
if (fd != kIllegalRawFD && GetLastError() == ERROR_ALREADY_EXISTS)
SetEndOfFile(fd); // truncate the existing file
return fd;
}
void RawWrite(RawFD handle, const char* buf, size_t len) {
while (len > 0) {
DWORD wrote;
BOOL ok = WriteFile(handle, buf, len, &wrote, NULL);
// We do not use an asynchronous file handle, so ok==false means an error
if (!ok) break;
buf += wrote;
len -= wrote;
}
}
void RawClose(RawFD handle) {
CloseHandle(handle);
}
#else // _WIN32 || __CYGWIN__ || __CYGWIN32__
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
// Re-run fn until it doesn't cause EINTR.
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
RawFD RawOpenForWriting(const char* filename) {
return open(filename, O_WRONLY|O_CREAT|O_TRUNC, 0664);
}
void RawWrite(RawFD fd, const char* buf, size_t len) {
while (len > 0) {
ssize_t r;
NO_INTR(r = write(fd, buf, len));
if (r <= 0) break;
buf += r;
len -= r;
}
}
void RawClose(RawFD fd) {
NO_INTR(close(fd));
}
#endif // _WIN32 || __CYGWIN__ || __CYGWIN32__

View File

@ -1,259 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file contains #include information about logging-related stuff.
// Pretty much everybody needs to #include this file so that they can
// log various happenings.
//
#ifndef _LOGGING_H_
#define _LOGGING_H_
#include "../config.h"
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for write()
#endif
#include <string.h> // for strlen(), strcmp()
#include <assert.h>
#include <errno.h> // for errno
#include "base/commandlineflags.h"
// On some systems (like freebsd), we can't call write() at all in a
// global constructor, perhaps because errno hasn't been set up.
// (In windows, we can't call it because it might call malloc.)
// Calling the write syscall is safer (it doesn't set errno), so we
// prefer that. Note we don't care about errno for logging: we just
// do logging on a best-effort basis.
#if defined(_MSC_VER)
#define WRITE_TO_STDERR(buf, len) WriteToStderr(buf, len); // in port.cc
#elif defined(HAVE_SYS_SYSCALL_H)
#include <sys/syscall.h>
#define WRITE_TO_STDERR(buf, len) syscall(SYS_write, STDERR_FILENO, buf, len)
#else
#define WRITE_TO_STDERR(buf, len) write(STDERR_FILENO, buf, len)
#endif
// MSVC and mingw define their own, safe version of vnsprintf (the
// windows one in broken) in port.cc. Everyone else can use the
// version here. We had to give it a unique name for windows.
#ifndef _WIN32
# define perftools_vsnprintf vsnprintf
#endif
// We log all messages at this log-level and below.
// INFO == -1, WARNING == -2, ERROR == -3, FATAL == -4
DECLARE_int32(verbose);
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by NDEBUG, so the check will be executed regardless of
// compilation mode. Therefore, it is safe to do things like:
// CHECK(fp->Write(x) == 4)
// Note we use write instead of printf/puts to avoid the risk we'll
// call malloc().
#define CHECK(condition) \
do { \
if (!(condition)) { \
WRITE_TO_STDERR("Check failed: " #condition "\n", \
sizeof("Check failed: " #condition "\n")-1); \
abort(); \
} \
} while (0)
// This takes a message to print. The name is historical.
#define RAW_CHECK(condition, message) \
do { \
if (!(condition)) { \
WRITE_TO_STDERR("Check failed: " #condition ": " message "\n", \
sizeof("Check failed: " #condition ": " message "\n")-1);\
abort(); \
} \
} while (0)
// This is like RAW_CHECK, but only in debug-mode
#ifdef NDEBUG
enum { DEBUG_MODE = 0 };
#define RAW_DCHECK(condition, message)
#else
enum { DEBUG_MODE = 1 };
#define RAW_DCHECK(condition, message) RAW_CHECK(condition, message)
#endif
// This prints errno as well. Note we use write instead of printf/puts to
// avoid the risk we'll call malloc().
#define PCHECK(condition) \
do { \
if (!(condition)) { \
const int err_no = errno; \
WRITE_TO_STDERR("Check failed: " #condition ": ", \
sizeof("Check failed: " #condition ": ")-1); \
WRITE_TO_STDERR(strerror(err_no), strlen(strerror(err_no))); \
WRITE_TO_STDERR("\n", sizeof("\n")-1); \
abort(); \
} \
} while (0)
// Helper macro for binary operators; prints the two values on error
// Don't use this macro directly in your code, use CHECK_EQ et al below
// WARNING: These don't compile correctly if one of the arguments is a pointer
// and the other is NULL. To work around this, simply static_cast NULL to the
// type of the desired pointer.
// TODO(jandrews): Also print the values in case of failure. Requires some
// sort of type-sensitive ToString() function.
#define CHECK_OP(op, val1, val2) \
do { \
if (!((val1) op (val2))) { \
fprintf(stderr, "Check failed: %s %s %s\n", #val1, #op, #val2); \
abort(); \
} \
} while (0)
#define CHECK_EQ(val1, val2) CHECK_OP(==, val1, val2)
#define CHECK_NE(val1, val2) CHECK_OP(!=, val1, val2)
#define CHECK_LE(val1, val2) CHECK_OP(<=, val1, val2)
#define CHECK_LT(val1, val2) CHECK_OP(< , val1, val2)
#define CHECK_GE(val1, val2) CHECK_OP(>=, val1, val2)
#define CHECK_GT(val1, val2) CHECK_OP(> , val1, val2)
// Synonyms for CHECK_* that are used in some unittests.
#define EXPECT_EQ(val1, val2) CHECK_EQ(val1, val2)
#define EXPECT_NE(val1, val2) CHECK_NE(val1, val2)
#define EXPECT_LE(val1, val2) CHECK_LE(val1, val2)
#define EXPECT_LT(val1, val2) CHECK_LT(val1, val2)
#define EXPECT_GE(val1, val2) CHECK_GE(val1, val2)
#define EXPECT_GT(val1, val2) CHECK_GT(val1, val2)
#define ASSERT_EQ(val1, val2) EXPECT_EQ(val1, val2)
#define ASSERT_NE(val1, val2) EXPECT_NE(val1, val2)
#define ASSERT_LE(val1, val2) EXPECT_LE(val1, val2)
#define ASSERT_LT(val1, val2) EXPECT_LT(val1, val2)
#define ASSERT_GE(val1, val2) EXPECT_GE(val1, val2)
#define ASSERT_GT(val1, val2) EXPECT_GT(val1, val2)
// As are these variants.
#define EXPECT_TRUE(cond) CHECK(cond)
#define EXPECT_FALSE(cond) CHECK(!(cond))
#define EXPECT_STREQ(a, b) CHECK(strcmp(a, b) == 0)
#define ASSERT_TRUE(cond) EXPECT_TRUE(cond)
#define ASSERT_FALSE(cond) EXPECT_FALSE(cond)
#define ASSERT_STREQ(a, b) EXPECT_STREQ(a, b)
// Used for (libc) functions that return -1 and set errno
#define CHECK_ERR(invocation) PCHECK((invocation) != -1)
// A few more checks that only happen in debug mode
#ifdef NDEBUG
#define DCHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2)
#else
#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
#endif
#ifdef ERROR
#undef ERROR // may conflict with ERROR macro on windows
#endif
enum LogSeverity {INFO = -1, WARNING = -2, ERROR = -3, FATAL = -4};
// NOTE: we add a newline to the end of the output if it's not there already
inline void LogPrintf(int severity, const char* pat, va_list ap) {
// We write directly to the stderr file descriptor and avoid FILE
// buffering because that may invoke malloc()
char buf[600];
perftools_vsnprintf(buf, sizeof(buf)-1, pat, ap);
if (buf[0] != '\0' && buf[strlen(buf)-1] != '\n') {
assert(strlen(buf)+1 < sizeof(buf));
strcat(buf, "\n");
}
WRITE_TO_STDERR(buf, strlen(buf));
if ((severity) == FATAL)
abort(); // LOG(FATAL) indicates a big problem, so don't run atexit() calls
}
// Note that since the order of global constructors is unspecified,
// global code that calls RAW_LOG may execute before FLAGS_verbose is set.
// Such code will run with verbosity == 0 no matter what.
#define VLOG_IS_ON(severity) (FLAGS_verbose >= severity)
// In a better world, we'd use __VA_ARGS__, but VC++ 7 doesn't support it.
#define LOG_PRINTF(severity, pat) do { \
if (VLOG_IS_ON(severity)) { \
va_list ap; \
va_start(ap, pat); \
LogPrintf(severity, pat, ap); \
va_end(ap); \
} \
} while (0)
// RAW_LOG is the main function; some synonyms are used in unittests.
inline void RAW_LOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void RAW_VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void LOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void LOG_IF(int lvl, bool cond, const char* pat, ...) {
if (cond) LOG_PRINTF(lvl, pat);
}
// This isn't technically logging, but it's also IO and also is an
// attempt to be "raw" -- that is, to not use any higher-level libc
// routines that might allocate memory or (ideally) try to allocate
// locks. We use an opaque file handle (not necessarily an int)
// to allow even more low-level stuff in the future.
// Like other "raw" routines, these functions are best effort, and
// thus don't return error codes (except RawOpenForWriting()).
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
#ifndef NOMINMAX
#define NOMINMAX // @#!$& windows
#endif
#include <windows.h>
typedef HANDLE RawFD;
const RawFD kIllegalRawFD = INVALID_HANDLE_VALUE;
#else
typedef int RawFD;
const RawFD kIllegalRawFD = -1; // what open returns if it fails
#endif // defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
RawFD RawOpenForWriting(const char* filename); // uses default permissions
void RawWrite(RawFD fd, const char* buf, size_t len);
void RawClose(RawFD fd);
#endif // _LOGGING_H_

View File

@ -1,582 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// A low-level allocator that can be used by other low-level
// modules without introducing dependency cycles.
// This allocator is slow and wasteful of memory;
// it should not be used when performance is key.
#include "base/low_level_alloc.h"
#include "base/dynamic_annotations.h"
#include "base/spinlock.h"
#include "base/logging.h"
#include "malloc_hook-inl.h"
#include <gperftools/malloc_hook.h>
#include <errno.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_MMAP
#include <sys/mman.h>
#endif
#include <new> // for placement-new
// On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
// form of the name instead.
#ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON
#endif
// A first-fit allocator with amortized logarithmic free() time.
LowLevelAlloc::PagesAllocator::~PagesAllocator() {
}
// ---------------------------------------------------------------------------
static const int kMaxLevel = 30;
// We put this class-only struct in a namespace to avoid polluting the
// global namespace with this struct name (thus risking an ODR violation).
namespace low_level_alloc_internal {
// This struct describes one allocated block, or one free block.
struct AllocList {
struct Header {
intptr_t size; // size of entire region, including this field. Must be
// first. Valid in both allocated and unallocated blocks
intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
LowLevelAlloc::Arena *arena; // pointer to parent arena
void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
} header;
// Next two fields: in unallocated blocks: freelist skiplist data
// in allocated blocks: overlaps with client data
int levels; // levels in skiplist used
AllocList *next[kMaxLevel]; // actually has levels elements.
// The AllocList node may not have room for
// all kMaxLevel entries. See max_fit in
// LLA_SkiplistLevels()
};
}
using low_level_alloc_internal::AllocList;
// ---------------------------------------------------------------------------
// A trivial skiplist implementation. This is used to keep the freelist
// in address order while taking only logarithmic time per insert and delete.
// An integer approximation of log2(size/base)
// Requires size >= base.
static int IntLog2(size_t size, size_t base) {
int result = 0;
for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
result++;
}
// floor(size / 2**result) <= base < floor(size / 2**(result-1))
// => log2(size/(base+1)) <= result < 1+log2(size/base)
// => result ~= log2(size/base)
return result;
}
// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
static int Random() {
static uint32 r = 1; // no locking---it's not critical
ANNOTATE_BENIGN_RACE(&r, "benign race, not critical.");
int result = 1;
while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
result++;
}
return result;
}
// Return a number of skiplist levels for a node of size bytes, where
// base is the minimum node size. Compute level=log2(size / base)+n
// where n is 1 if random is false and otherwise a random number generated with
// the standard distribution for a skiplist: See Random() above.
// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
// term, so first-fit searches touch fewer nodes. "level" is clipped so
// level<kMaxLevel and next[level-1] will fit in the node.
// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
static int LLA_SkiplistLevels(size_t size, size_t base, bool random) {
// max_fit is the maximum number of levels that will fit in a node for the
// given size. We can't return more than max_fit, no matter what the
// random number generator says.
int max_fit = (size-OFFSETOF_MEMBER(AllocList, next)) / sizeof (AllocList *);
int level = IntLog2(size, base) + (random? Random() : 1);
if (level > max_fit) level = max_fit;
if (level > kMaxLevel-1) level = kMaxLevel - 1;
RAW_CHECK(level >= 1, "block not big enough for even one level");
return level;
}
// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
// points to the last element at level i in the AllocList less than *e, or is
// head if no such element exists.
static AllocList *LLA_SkiplistSearch(AllocList *head,
AllocList *e, AllocList **prev) {
AllocList *p = head;
for (int level = head->levels - 1; level >= 0; level--) {
for (AllocList *n; (n = p->next[level]) != 0 && n < e; p = n) {
}
prev[level] = p;
}
return (head->levels == 0) ? 0 : prev[0]->next[0];
}
// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
// Requires that e->levels be previously set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
AllocList **prev) {
LLA_SkiplistSearch(head, e, prev);
for (; head->levels < e->levels; head->levels++) { // extend prev pointers
prev[head->levels] = head; // to all *e's levels
}
for (int i = 0; i != e->levels; i++) { // add element to list
e->next[i] = prev[i]->next[i];
prev[i]->next[i] = e;
}
}
// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
// Requires that e->levels be previous set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
AllocList **prev) {
AllocList *found = LLA_SkiplistSearch(head, e, prev);
RAW_CHECK(e == found, "element not in freelist");
for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
prev[i]->next[i] = e->next[i];
}
while (head->levels > 0 && head->next[head->levels - 1] == 0) {
head->levels--; // reduce head->levels if level unused
}
}
// ---------------------------------------------------------------------------
// Arena implementation
struct LowLevelAlloc::Arena {
Arena() : mu(SpinLock::LINKER_INITIALIZED) {} // does nothing; for static init
explicit Arena(int) : pagesize(0) {} // set pagesize to zero explicitly
// for non-static init
SpinLock mu; // protects freelist, allocation_count,
// pagesize, roundup, min_size
AllocList freelist; // head of free list; sorted by addr (under mu)
int32 allocation_count; // count of allocated blocks (under mu)
int32 flags; // flags passed to NewArena (ro after init)
size_t pagesize; // ==getpagesize() (init under mu, then ro)
size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList))
// (init under mu, then ro)
size_t min_size; // smallest allocation block size
// (init under mu, then ro)
PagesAllocator *allocator;
};
// The default arena, which is used when 0 is passed instead of an Arena
// pointer.
static struct LowLevelAlloc::Arena default_arena;
// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
// do not want malloc hook reporting, so that for them there's no malloc hook
// reporting even during arena creation.
static struct LowLevelAlloc::Arena unhooked_arena;
static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
namespace {
class DefaultPagesAllocator : public LowLevelAlloc::PagesAllocator {
public:
virtual ~DefaultPagesAllocator() {};
virtual void *MapPages(int32 flags, size_t size);
virtual void UnMapPages(int32 flags, void *addr, size_t size);
};
}
// magic numbers to identify allocated and unallocated blocks
static const intptr_t kMagicAllocated = 0x4c833e95;
static const intptr_t kMagicUnallocated = ~kMagicAllocated;
namespace {
class SCOPED_LOCKABLE ArenaLock {
public:
explicit ArenaLock(LowLevelAlloc::Arena *arena)
EXCLUSIVE_LOCK_FUNCTION(arena->mu)
: left_(false), mask_valid_(false), arena_(arena) {
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
// We've decided not to support async-signal-safe arena use until
// there a demonstrated need. Here's how one could do it though
// (would need to be made more portable).
#if 0
sigset_t all;
sigfillset(&all);
this->mask_valid_ =
(pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
#else
RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
#endif
}
this->arena_->mu.Lock();
}
~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
void Leave() /*UNLOCK_FUNCTION()*/ {
this->arena_->mu.Unlock();
#if 0
if (this->mask_valid_) {
pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
}
#endif
this->left_ = true;
}
private:
bool left_; // whether left region
bool mask_valid_;
#if 0
sigset_t mask_; // old mask of blocked signals
#endif
LowLevelAlloc::Arena *arena_;
DISALLOW_COPY_AND_ASSIGN(ArenaLock);
};
} // anonymous namespace
// create an appropriate magic number for an object at "ptr"
// "magic" should be kMagicAllocated or kMagicUnallocated
inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
return magic ^ reinterpret_cast<intptr_t>(ptr);
}
// Initialize the fields of an Arena
static void ArenaInit(LowLevelAlloc::Arena *arena) {
if (arena->pagesize == 0) {
arena->pagesize = getpagesize();
// Round up block sizes to a power of two close to the header size.
arena->roundup = 16;
while (arena->roundup < sizeof (arena->freelist.header)) {
arena->roundup += arena->roundup;
}
// Don't allocate blocks less than twice the roundup size to avoid tiny
// free blocks.
arena->min_size = 2 * arena->roundup;
arena->freelist.header.size = 0;
arena->freelist.header.magic =
Magic(kMagicUnallocated, &arena->freelist.header);
arena->freelist.header.arena = arena;
arena->freelist.levels = 0;
memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
arena->allocation_count = 0;
if (arena == &default_arena) {
// Default arena should be hooked, e.g. for heap-checker to trace
// pointer chains through objects in the default arena.
arena->flags = LowLevelAlloc::kCallMallocHook;
} else if (arena == &unhooked_async_sig_safe_arena) {
arena->flags = LowLevelAlloc::kAsyncSignalSafe;
} else {
arena->flags = 0; // other arenas' flags may be overridden by client,
// but unhooked_arena will have 0 in 'flags'.
}
arena->allocator = LowLevelAlloc::GetDefaultPagesAllocator();
}
}
// L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
Arena *meta_data_arena) {
return NewArenaWithCustomAlloc(flags, meta_data_arena, NULL);
}
// L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32 flags,
Arena *meta_data_arena,
PagesAllocator *allocator) {
RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
if (meta_data_arena == &default_arena) {
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
meta_data_arena = &unhooked_async_sig_safe_arena;
} else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
meta_data_arena = &unhooked_arena;
}
}
// Arena(0) uses the constructor for non-static contexts
Arena *result =
new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
ArenaInit(result);
result->flags = flags;
if (allocator) {
result->allocator = allocator;
}
return result;
}
// L < arena->mu, L < arena->arena->mu
bool LowLevelAlloc::DeleteArena(Arena *arena) {
RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
"may not delete default arena");
ArenaLock section(arena);
bool empty = (arena->allocation_count == 0);
section.Leave();
if (empty) {
while (arena->freelist.next[0] != 0) {
AllocList *region = arena->freelist.next[0];
size_t size = region->header.size;
arena->freelist.next[0] = region->next[0];
RAW_CHECK(region->header.magic ==
Magic(kMagicUnallocated, &region->header),
"bad magic number in DeleteArena()");
RAW_CHECK(region->header.arena == arena,
"bad arena pointer in DeleteArena()");
RAW_CHECK(size % arena->pagesize == 0,
"empty arena has non-page-aligned block size");
RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
"empty arena has non-page-aligned block");
int munmap_result;
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
munmap_result = munmap(region, size);
} else {
munmap_result = MallocHook::UnhookedMUnmap(region, size);
}
RAW_CHECK(munmap_result == 0,
"LowLevelAlloc::DeleteArena: munmap failed address");
}
Free(arena);
}
return empty;
}
// ---------------------------------------------------------------------------
// Return value rounded up to next multiple of align.
// align must be a power of two.
static intptr_t RoundUp(intptr_t addr, intptr_t align) {
return (addr + align - 1) & ~(align - 1);
}
// Equivalent to "return prev->next[i]" but with sanity checking
// that the freelist is in the correct order, that it
// consists of regions marked "unallocated", and that no two regions
// are adjacent in memory (they should have been coalesced).
// L < arena->mu
static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
RAW_CHECK(i < prev->levels, "too few levels in Next()");
AllocList *next = prev->next[i];
if (next != 0) {
RAW_CHECK(next->header.magic == Magic(kMagicUnallocated, &next->header),
"bad magic number in Next()");
RAW_CHECK(next->header.arena == arena,
"bad arena pointer in Next()");
if (prev != &arena->freelist) {
RAW_CHECK(prev < next, "unordered freelist");
RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
reinterpret_cast<char *>(next), "malformed freelist");
}
}
return next;
}
// Coalesce list item "a" with its successor if they are adjacent.
static void Coalesce(AllocList *a) {
AllocList *n = a->next[0];
if (n != 0 && reinterpret_cast<char *>(a) + a->header.size ==
reinterpret_cast<char *>(n)) {
LowLevelAlloc::Arena *arena = a->header.arena;
a->header.size += n->header.size;
n->header.magic = 0;
n->header.arena = 0;
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, n, prev);
LLA_SkiplistDelete(&arena->freelist, a, prev);
a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, true);
LLA_SkiplistInsert(&arena->freelist, a, prev);
}
}
// Adds block at location "v" to the free list
// L >= arena->mu
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
AllocList *f = reinterpret_cast<AllocList *>(
reinterpret_cast<char *>(v) - sizeof (f->header));
RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in AddToFreelist()");
RAW_CHECK(f->header.arena == arena,
"bad arena pointer in AddToFreelist()");
f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, true);
AllocList *prev[kMaxLevel];
LLA_SkiplistInsert(&arena->freelist, f, prev);
f->header.magic = Magic(kMagicUnallocated, &f->header);
Coalesce(f); // maybe coalesce with successor
Coalesce(prev[0]); // maybe coalesce with predecessor
}
// Frees storage allocated by LowLevelAlloc::Alloc().
// L < arena->mu
void LowLevelAlloc::Free(void *v) {
if (v != 0) {
AllocList *f = reinterpret_cast<AllocList *>(
reinterpret_cast<char *>(v) - sizeof (f->header));
RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in Free()");
LowLevelAlloc::Arena *arena = f->header.arena;
if ((arena->flags & kCallMallocHook) != 0) {
MallocHook::InvokeDeleteHook(v);
}
ArenaLock section(arena);
AddToFreelist(v, arena);
RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
arena->allocation_count--;
section.Leave();
}
}
// allocates and returns a block of size bytes, to be freed with Free()
// L < arena->mu
static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
void *result = 0;
if (request != 0) {
AllocList *s; // will point to region that satisfies request
ArenaLock section(arena);
ArenaInit(arena);
// round up with header
size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
for (;;) { // loop until we find a suitable region
// find the minimum levels that a block of this size must have
int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
if (i < arena->freelist.levels) { // potential blocks exist
AllocList *before = &arena->freelist; // predecessor of s
while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
before = s;
}
if (s != 0) { // we found a region
break;
}
}
// we unlock before mmap() both because mmap() may call a callback hook,
// and because it may be slow.
arena->mu.Unlock();
// mmap generous 64K chunks to decrease
// the chances/impact of fragmentation:
size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
void *new_pages = arena->allocator->MapPages(arena->flags, new_pages_size);
arena->mu.Lock();
s = reinterpret_cast<AllocList *>(new_pages);
s->header.size = new_pages_size;
// Pretend the block is allocated; call AddToFreelist() to free it.
s->header.magic = Magic(kMagicAllocated, &s->header);
s->header.arena = arena;
AddToFreelist(&s->levels, arena); // insert new region into free list
}
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
// s points to the first free region that's big enough
if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
AllocList *n = reinterpret_cast<AllocList *>
(req_rnd + reinterpret_cast<char *>(s));
n->header.size = s->header.size - req_rnd;
n->header.magic = Magic(kMagicAllocated, &n->header);
n->header.arena = arena;
s->header.size = req_rnd;
AddToFreelist(&n->levels, arena);
}
s->header.magic = Magic(kMagicAllocated, &s->header);
RAW_CHECK(s->header.arena == arena, "");
arena->allocation_count++;
section.Leave();
result = &s->levels;
}
ANNOTATE_NEW_MEMORY(result, request);
return result;
}
void *LowLevelAlloc::Alloc(size_t request) {
void *result = DoAllocWithArena(request, &default_arena);
if ((default_arena.flags & kCallMallocHook) != 0) {
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly
MallocHook::InvokeNewHook(result, request);
}
return result;
}
void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
RAW_CHECK(arena != 0, "must pass a valid arena");
void *result = DoAllocWithArena(request, arena);
if ((arena->flags & kCallMallocHook) != 0) {
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly
MallocHook::InvokeNewHook(result, request);
}
return result;
}
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
return &default_arena;
}
static DefaultPagesAllocator *default_pages_allocator;
static union {
char chars[sizeof(DefaultPagesAllocator)];
void *ptr;
} debug_pages_allocator_space;
LowLevelAlloc::PagesAllocator *LowLevelAlloc::GetDefaultPagesAllocator(void) {
if (default_pages_allocator) {
return default_pages_allocator;
}
default_pages_allocator = new (debug_pages_allocator_space.chars) DefaultPagesAllocator();
return default_pages_allocator;
}
void *DefaultPagesAllocator::MapPages(int32 flags, size_t size) {
void *new_pages;
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
new_pages = MallocHook::UnhookedMMap(0, size,
PROT_WRITE|PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
} else {
new_pages = mmap(0, size,
PROT_WRITE|PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
}
RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
return new_pages;
}
void DefaultPagesAllocator::UnMapPages(int32 flags, void *region, size_t size) {
int munmap_result;
if ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
munmap_result = munmap(region, size);
} else {
munmap_result = MallocHook::UnhookedMUnmap(region, size);
}
RAW_CHECK(munmap_result == 0,
"LowLevelAlloc::DeleteArena: munmap failed address");
}

View File

@ -1,120 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if !defined(_BASE_LOW_LEVEL_ALLOC_H_)
#define _BASE_LOW_LEVEL_ALLOC_H_
// A simple thread-safe memory allocator that does not depend on
// mutexes or thread-specific data. It is intended to be used
// sparingly, and only when malloc() would introduce an unwanted
// dependency, such as inside the heap-checker.
#include "../config.h"
#include <stddef.h> // for size_t
#include "base/basictypes.h"
class LowLevelAlloc {
public:
class PagesAllocator {
public:
virtual ~PagesAllocator();
virtual void *MapPages(int32 flags, size_t size) = 0;
virtual void UnMapPages(int32 flags, void *addr, size_t size) = 0;
};
static PagesAllocator *GetDefaultPagesAllocator(void);
struct Arena; // an arena from which memory may be allocated
// Returns a pointer to a block of at least "request" bytes
// that have been newly allocated from the specific arena.
// for Alloc() call the DefaultArena() is used.
// Returns 0 if passed request==0.
// Does not return 0 under other circumstances; it crashes if memory
// is not available.
static void *Alloc(size_t request)
ATTRIBUTE_SECTION(malloc_hook);
static void *AllocWithArena(size_t request, Arena *arena)
ATTRIBUTE_SECTION(malloc_hook);
// Deallocates a region of memory that was previously allocated with
// Alloc(). Does nothing if passed 0. "s" must be either 0,
// or must have been returned from a call to Alloc() and not yet passed to
// Free() since that call to Alloc(). The space is returned to the arena
// from which it was allocated.
static void Free(void *s) ATTRIBUTE_SECTION(malloc_hook);
// ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
// are to put all callers of MallocHook::Invoke* in this module
// into special section,
// so that MallocHook::GetCallerStackTrace can function accurately.
// Create a new arena.
// The root metadata for the new arena is allocated in the
// meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
// These values may be ored into flags:
enum {
// Report calls to Alloc() and Free() via the MallocHook interface.
// Set in the DefaultArena.
kCallMallocHook = 0x0001,
// Make calls to Alloc(), Free() be async-signal-safe. Not set in
// DefaultArena().
kAsyncSignalSafe = 0x0002,
// When used with DefaultArena(), the NewArena() and DeleteArena() calls
// obey the flags given explicitly in the NewArena() call, even if those
// flags differ from the settings in DefaultArena(). So the call
// NewArena(kAsyncSignalSafe, DefaultArena()) is itself async-signal-safe,
// as well as generatating an arena that provides async-signal-safe
// Alloc/Free.
};
static Arena *NewArena(int32 flags, Arena *meta_data_arena);
// note: pages allocator will never be destroyed and allocated pages will never be freed
// When allocator is NULL, it's same as NewArena
static Arena *NewArenaWithCustomAlloc(int32 flags, Arena *meta_data_arena, PagesAllocator *allocator);
// Destroys an arena allocated by NewArena and returns true,
// provided no allocated blocks remain in the arena.
// If allocated blocks remain in the arena, does nothing and
// returns false.
// It is illegal to attempt to destroy the DefaultArena().
static bool DeleteArena(Arena *arena);
// The default arena that always exists.
static Arena *DefaultArena();
private:
LowLevelAlloc(); // no instances
};
#endif

View File

@ -1,332 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// ---
// Author: Craig Silverstein.
//
// A simple mutex wrapper, supporting locks and read-write locks.
// You should assume the locks are *not* re-entrant.
//
// To use: you should define the following macros in your configure.ac:
// ACX_PTHREAD
// AC_RWLOCK
// The latter is defined in ../autoconf.
//
// This class is meant to be internal-only and should be wrapped by an
// internal namespace. Before you use this module, please give the
// name of your internal namespace for this module. Or, if you want
// to expose it, you'll want to move it to the Google namespace. We
// cannot put this class in global namespace because there can be some
// problems when we have multiple versions of Mutex in each shared object.
//
// NOTE: TryLock() is broken for NO_THREADS mode, at least in NDEBUG
// mode.
//
// CYGWIN NOTE: Cygwin support for rwlock seems to be buggy:
// http://www.cygwin.com/ml/cygwin/2008-12/msg00017.html
// Because of that, we might as well use windows locks for
// cygwin. They seem to be more reliable than the cygwin pthreads layer.
//
// TRICKY IMPLEMENTATION NOTE:
// This class is designed to be safe to use during
// dynamic-initialization -- that is, by global constructors that are
// run before main() starts. The issue in this case is that
// dynamic-initialization happens in an unpredictable order, and it
// could be that someone else's dynamic initializer could call a
// function that tries to acquire this mutex -- but that all happens
// before this mutex's constructor has run. (This can happen even if
// the mutex and the function that uses the mutex are in the same .cc
// file.) Basically, because Mutex does non-trivial work in its
// constructor, it's not, in the naive implementation, safe to use
// before dynamic initialization has run on it.
//
// The solution used here is to pair the actual mutex primitive with a
// bool that is set to true when the mutex is dynamically initialized.
// (Before that it's false.) Then we modify all mutex routines to
// look at the bool, and not try to lock/unlock until the bool makes
// it to true (which happens after the Mutex constructor has run.)
//
// This works because before main() starts -- particularly, during
// dynamic initialization -- there are no threads, so a) it's ok that
// the mutex operations are a no-op, since we don't need locking then
// anyway; and b) we can be quite confident our bool won't change
// state between a call to Lock() and a call to Unlock() (that would
// require a global constructor in one translation unit to call Lock()
// and another global constructor in another translation unit to call
// Unlock() later, which is pretty perverse).
//
// That said, it's tricky, and can conceivably fail; it's safest to
// avoid trying to acquire a mutex in a global constructor, if you
// can. One way it can fail is that a really smart compiler might
// initialize the bool to true at static-initialization time (too
// early) rather than at dynamic-initialization time. To discourage
// that, we set is_safe_ to true in code (not the constructor
// colon-initializer) and set it to true via a function that always
// evaluates to true, but that the compiler can't know always
// evaluates to true. This should be good enough.
//
// A related issue is code that could try to access the mutex
// after it's been destroyed in the global destructors (because
// the Mutex global destructor runs before some other global
// destructor, that tries to acquire the mutex). The way we
// deal with this is by taking a constructor arg that global
// mutexes should pass in, that causes the destructor to do no
// work. We still depend on the compiler not doing anything
// weird to a Mutex's memory after it is destroyed, but for a
// static global variable, that's pretty safe.
#ifndef GOOGLE_MUTEX_H_
#define GOOGLE_MUTEX_H_
#include "../config.h"
#if defined(NO_THREADS)
typedef int MutexType; // to keep a lock-count
#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN // We only need minimal includes
# endif
// We need Windows NT or later for TryEnterCriticalSection(). If you
// don't need that functionality, you can remove these _WIN32_WINNT
// lines, and change TryLock() to assert(0) or something.
# ifndef _WIN32_WINNT
# define _WIN32_WINNT 0x0400
# endif
# include <windows.h>
typedef CRITICAL_SECTION MutexType;
#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
// Needed for pthread_rwlock_*. If it causes problems, you could take it
// out, but then you'd have to unset HAVE_RWLOCK (at least on linux -- it
// *does* cause problems for FreeBSD, or MacOSX, but isn't needed
// for locking there.)
# ifdef __linux__
# define _XOPEN_SOURCE 500 // may be needed to get the rwlock calls
# endif
# include <pthread.h>
typedef pthread_rwlock_t MutexType;
#elif defined(HAVE_PTHREAD)
# include <pthread.h>
typedef pthread_mutex_t MutexType;
#else
# error Need to implement mutex.h for your architecture, or #define NO_THREADS
#endif
#include <assert.h>
#include <stdlib.h> // for abort()
#define MUTEX_NAMESPACE perftools_mutex_namespace
namespace MUTEX_NAMESPACE {
class Mutex {
public:
// This is used for the single-arg constructor
enum LinkerInitialized { LINKER_INITIALIZED };
// Create a Mutex that is not held by anybody. This constructor is
// typically used for Mutexes allocated on the heap or the stack.
inline Mutex();
// This constructor should be used for global, static Mutex objects.
// It inhibits work being done by the destructor, which makes it
// safer for code that tries to acqiure this mutex in their global
// destructor.
inline Mutex(LinkerInitialized);
// Destructor
inline ~Mutex();
inline void Lock(); // Block if needed until free then acquire exclusively
inline void Unlock(); // Release a lock acquired via Lock()
inline bool TryLock(); // If free, Lock() and return true, else return false
// Note that on systems that don't support read-write locks, these may
// be implemented as synonyms to Lock() and Unlock(). So you can use
// these for efficiency, but don't use them anyplace where being able
// to do shared reads is necessary to avoid deadlock.
inline void ReaderLock(); // Block until free or shared then acquire a share
inline void ReaderUnlock(); // Release a read share of this Mutex
inline void WriterLock() { Lock(); } // Acquire an exclusive lock
inline void WriterUnlock() { Unlock(); } // Release a lock from WriterLock()
private:
MutexType mutex_;
// We want to make sure that the compiler sets is_safe_ to true only
// when we tell it to, and never makes assumptions is_safe_ is
// always true. volatile is the most reliable way to do that.
volatile bool is_safe_;
// This indicates which constructor was called.
bool destroy_;
inline void SetIsSafe() { is_safe_ = true; }
// Catch the error of writing Mutex when intending MutexLock.
Mutex(Mutex* /*ignored*/) {}
// Disallow "evil" constructors
Mutex(const Mutex&);
void operator=(const Mutex&);
};
// Now the implementation of Mutex for various systems
#if defined(NO_THREADS)
// When we don't have threads, we can be either reading or writing,
// but not both. We can have lots of readers at once (in no-threads
// mode, that's most likely to happen in recursive function calls),
// but only one writer. We represent this by having mutex_ be -1 when
// writing and a number > 0 when reading (and 0 when no lock is held).
//
// In debug mode, we assert these invariants, while in non-debug mode
// we do nothing, for efficiency. That's why everything is in an
// assert.
Mutex::Mutex() : mutex_(0) { }
Mutex::Mutex(Mutex::LinkerInitialized) : mutex_(0) { }
Mutex::~Mutex() { assert(mutex_ == 0); }
void Mutex::Lock() { assert(--mutex_ == -1); }
void Mutex::Unlock() { assert(mutex_++ == -1); }
bool Mutex::TryLock() { if (mutex_) return false; Lock(); return true; }
void Mutex::ReaderLock() { assert(++mutex_ > 0); }
void Mutex::ReaderUnlock() { assert(mutex_-- > 0); }
#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
Mutex::Mutex() : destroy_(true) {
InitializeCriticalSection(&mutex_);
SetIsSafe();
}
Mutex::Mutex(LinkerInitialized) : destroy_(false) {
InitializeCriticalSection(&mutex_);
SetIsSafe();
}
Mutex::~Mutex() { if (destroy_) DeleteCriticalSection(&mutex_); }
void Mutex::Lock() { if (is_safe_) EnterCriticalSection(&mutex_); }
void Mutex::Unlock() { if (is_safe_) LeaveCriticalSection(&mutex_); }
bool Mutex::TryLock() { return is_safe_ ?
TryEnterCriticalSection(&mutex_) != 0 : true; }
void Mutex::ReaderLock() { Lock(); } // we don't have read-write locks
void Mutex::ReaderUnlock() { Unlock(); }
#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
if (is_safe_ && fncall(&mutex_) != 0) abort(); \
} while (0)
Mutex::Mutex() : destroy_(true) {
SetIsSafe();
if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
}
Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
SetIsSafe();
if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
}
Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_rwlock_destroy); }
void Mutex::Lock() { SAFE_PTHREAD(pthread_rwlock_wrlock); }
void Mutex::Unlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
bool Mutex::TryLock() { return is_safe_ ?
pthread_rwlock_trywrlock(&mutex_) == 0 : true; }
void Mutex::ReaderLock() { SAFE_PTHREAD(pthread_rwlock_rdlock); }
void Mutex::ReaderUnlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
#undef SAFE_PTHREAD
#elif defined(HAVE_PTHREAD)
#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
if (is_safe_ && fncall(&mutex_) != 0) abort(); \
} while (0)
Mutex::Mutex() : destroy_(true) {
SetIsSafe();
if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
}
Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
SetIsSafe();
if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
}
Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_mutex_destroy); }
void Mutex::Lock() { SAFE_PTHREAD(pthread_mutex_lock); }
void Mutex::Unlock() { SAFE_PTHREAD(pthread_mutex_unlock); }
bool Mutex::TryLock() { return is_safe_ ?
pthread_mutex_trylock(&mutex_) == 0 : true; }
void Mutex::ReaderLock() { Lock(); }
void Mutex::ReaderUnlock() { Unlock(); }
#undef SAFE_PTHREAD
#endif
// --------------------------------------------------------------------------
// Some helper classes
// MutexLock(mu) acquires mu when constructed and releases it when destroyed.
class MutexLock {
public:
explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
~MutexLock() { mu_->Unlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
MutexLock(const MutexLock&);
void operator=(const MutexLock&);
};
// ReaderMutexLock and WriterMutexLock do the same, for rwlocks
class ReaderMutexLock {
public:
explicit ReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
~ReaderMutexLock() { mu_->ReaderUnlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
ReaderMutexLock(const ReaderMutexLock&);
void operator=(const ReaderMutexLock&);
};
class WriterMutexLock {
public:
explicit WriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
~WriterMutexLock() { mu_->WriterUnlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
WriterMutexLock(const WriterMutexLock&);
void operator=(const WriterMutexLock&);
};
// Catch bug where variable name is omitted, e.g. MutexLock (&mu);
#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_decl_missing_var_name)
#define ReaderMutexLock(x) COMPILE_ASSERT(0, rmutex_lock_decl_missing_var_name)
#define WriterMutexLock(x) COMPILE_ASSERT(0, wmutex_lock_decl_missing_var_name)
} // namespace MUTEX_NAMESPACE
using namespace MUTEX_NAMESPACE;
#undef MUTEX_NAMESPACE
#endif /* #define GOOGLE_SIMPLE_MUTEX_H_ */

View File

@ -1,129 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
#include "../config.h"
#include "base/spinlock.h"
#include "base/spinlock_internal.h"
#include "base/sysinfo.h" /* for GetSystemCPUsCount() */
// NOTE on the Lock-state values:
//
// kSpinLockFree represents the unlocked state
// kSpinLockHeld represents the locked state with no waiters
// kSpinLockSleeper represents the locked state with waiters
static int adaptive_spin_count = 0;
const base::LinkerInitialized SpinLock::LINKER_INITIALIZED =
base::LINKER_INITIALIZED;
namespace {
struct SpinLock_InitHelper {
SpinLock_InitHelper() {
// On multi-cpu machines, spin for longer before yielding
// the processor or sleeping. Reduces idle time significantly.
if (GetSystemCPUsCount() > 1) {
adaptive_spin_count = 1000;
}
}
};
// Hook into global constructor execution:
// We do not do adaptive spinning before that,
// but nothing lock-intensive should be going on at that time.
static SpinLock_InitHelper init_helper;
inline void SpinlockPause(void) {
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
__asm__ __volatile__("rep; nop" : : );
#endif
}
} // unnamed namespace
// Monitor the lock to see if its value changes within some time
// period (adaptive_spin_count loop iterations). The last value read
// from the lock is returned from the method.
Atomic32 SpinLock::SpinLoop() {
int c = adaptive_spin_count;
while (base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree && --c > 0) {
SpinlockPause();
}
return base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockSleeper);
}
void SpinLock::SlowLock() {
Atomic32 lock_value = SpinLoop();
int lock_wait_call_count = 0;
while (lock_value != kSpinLockFree) {
// If the lock is currently held, but not marked as having a sleeper, mark
// it as having a sleeper.
if (lock_value == kSpinLockHeld) {
// Here, just "mark" that the thread is going to sleep. Don't store the
// lock wait time in the lock as that will cause the current lock
// owner to think it experienced contention.
lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
kSpinLockHeld,
kSpinLockSleeper);
if (lock_value == kSpinLockHeld) {
// Successfully transitioned to kSpinLockSleeper. Pass
// kSpinLockSleeper to the SpinLockDelay routine to properly indicate
// the last lock_value observed.
lock_value = kSpinLockSleeper;
} else if (lock_value == kSpinLockFree) {
// Lock is free again, so try and acquire it before sleeping. The
// new lock state will be the number of cycles this thread waited if
// this thread obtains the lock.
lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
kSpinLockFree,
kSpinLockSleeper);
continue; // skip the delay at the end of the loop
}
}
// Wait for an OS specific delay.
base::internal::SpinLockDelay(&lockword_, lock_value,
++lock_wait_call_count);
// Spin again after returning from the wait routine to give this thread
// some chance of obtaining the lock.
lock_value = SpinLoop();
}
}
void SpinLock::SlowUnlock() {
// wake waiter if necessary
base::internal::SpinLockWake(&lockword_, false);
}

View File

@ -1,143 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// SpinLock is async signal safe.
// If used within a signal handler, all lock holders
// should block the signal even outside the signal handler.
#ifndef BASE_SPINLOCK_H_
#define BASE_SPINLOCK_H_
#include "../config.h"
#include "base/atomicops.h"
#include "base/basictypes.h"
#include "base/dynamic_annotations.h"
#include "base/thread_annotations.h"
class LOCKABLE SpinLock {
public:
SpinLock() : lockword_(kSpinLockFree) { }
// Special constructor for use with static SpinLock objects. E.g.,
//
// static SpinLock lock(base::LINKER_INITIALIZED);
//
// When intialized using this constructor, we depend on the fact
// that the linker has already initialized the memory appropriately.
// A SpinLock constructed like this can be freely used from global
// initializers without worrying about the order in which global
// initializers run.
explicit SpinLock(base::LinkerInitialized /*x*/) {
// Does nothing; lockword_ is already initialized
}
// Acquire this SpinLock.
// TODO(csilvers): uncomment the annotation when we figure out how to
// support this macro with 0 args (see thread_annotations.h)
inline void Lock() /*EXCLUSIVE_LOCK_FUNCTION()*/ {
if (base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockHeld) != kSpinLockFree) {
SlowLock();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
// Try to acquire this SpinLock without blocking and return true if the
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
bool res =
(base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockHeld) == kSpinLockFree);
if (res) {
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
return res;
}
// Release this SpinLock, which must be held by the calling thread.
// TODO(csilvers): uncomment the annotation when we figure out how to
// support this macro with 0 args (see thread_annotations.h)
inline void Unlock() /*UNLOCK_FUNCTION()*/ {
ANNOTATE_RWLOCK_RELEASED(this, 1);
uint64 prev_value = static_cast<uint64>(
base::subtle::Release_AtomicExchange(&lockword_, kSpinLockFree));
if (prev_value != kSpinLockHeld) {
// Speed the wakeup of any waiter.
SlowUnlock();
}
}
// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
inline bool IsHeld() const {
return base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree;
}
static const base::LinkerInitialized LINKER_INITIALIZED; // backwards compat
private:
enum { kSpinLockFree = 0 };
enum { kSpinLockHeld = 1 };
enum { kSpinLockSleeper = 2 };
volatile Atomic32 lockword_;
void SlowLock();
void SlowUnlock();
Atomic32 SpinLoop();
DISALLOW_COPY_AND_ASSIGN(SpinLock);
};
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
class SCOPED_LOCKABLE SpinLockHolder {
private:
SpinLock* lock_;
public:
inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
: lock_(l) {
l->Lock();
}
// TODO(csilvers): uncomment the annotation when we figure out how to
// support this macro with 0 args (see thread_annotations.h)
inline ~SpinLockHolder() /*UNLOCK_FUNCTION()*/ { lock_->Unlock(); }
};
// Catch bug where variable name is omitted, e.g. SpinLockHolder (&lock);
#define SpinLockHolder(x) COMPILE_ASSERT(0, spin_lock_decl_missing_var_name)
#endif // BASE_SPINLOCK_H_

View File

@ -1,102 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2010, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// The OS-specific header included below must provide two calls:
// base::internal::SpinLockDelay() and base::internal::SpinLockWake().
// See spinlock_internal.h for the spec of SpinLockWake().
// void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop)
// SpinLockDelay() generates an apprproate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
// or may wait for a delay that can be truncated by a call to SpinlockWake(w).
// In all cases, it must return in bounded time even if SpinlockWake() is not
// called.
#include "base/spinlock_internal.h"
// forward declaration for use by spinlock_*-inl.h
namespace base { namespace internal { static int SuggestedDelayNS(int loop); }}
#if defined(_WIN32)
#include "base/spinlock_win32-inl.h"
#elif defined(__linux__)
#include "base/spinlock_linux-inl.h"
#else
#include "base/spinlock_posix-inl.h"
#endif
namespace base {
namespace internal {
// Return a suggested delay in nanoseconds for iteration number "loop"
static int SuggestedDelayNS(int loop) {
// Weak pseudo-random number generator to get some spread between threads
// when many are spinning.
#ifdef BASE_HAS_ATOMIC64
static base::subtle::Atomic64 rand;
uint64 r = base::subtle::NoBarrier_Load(&rand);
r = 0x5deece66dLL * r + 0xb; // numbers from nrand48()
base::subtle::NoBarrier_Store(&rand, r);
r <<= 16; // 48-bit random number now in top 48-bits.
if (loop < 0 || loop > 32) { // limit loop to 0..32
loop = 32;
}
// loop>>3 cannot exceed 4 because loop cannot exceed 32.
// Select top 20..24 bits of lower 48 bits,
// giving approximately 0ms to 16ms.
// Mean is exponential in loop for first 32 iterations, then 8ms.
// The futex path multiplies this by 16, since we expect explicit wakeups
// almost always on that path.
return r >> (44 - (loop >> 3));
#else
static Atomic32 rand;
uint32 r = base::subtle::NoBarrier_Load(&rand);
r = 0x343fd * r + 0x269ec3; // numbers from MSVC++
base::subtle::NoBarrier_Store(&rand, r);
r <<= 1; // 31-bit random number now in top 31-bits.
if (loop < 0 || loop > 32) { // limit loop to 0..32
loop = 32;
}
// loop>>3 cannot exceed 4 because loop cannot exceed 32.
// Select top 20..24 bits of lower 31 bits,
// giving approximately 0ms to 16ms.
// Mean is exponential in loop for first 32 iterations, then 8ms.
// The futex path multiplies this by 16, since we expect explicit wakeups
// almost always on that path.
return r >> (12 - (loop >> 3));
#endif
}
} // namespace internal
} // namespace base

View File

@ -1,51 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2010, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is an internal part spinlock.cc and once.cc
* It may not be used directly by code outside of //base.
*/
#ifndef BASE_SPINLOCK_INTERNAL_H_
#define BASE_SPINLOCK_INTERNAL_H_
#include "../config.h"
#include "base/basictypes.h"
#include "base/atomicops.h"
namespace base {
namespace internal {
void SpinLockWake(volatile Atomic32 *w, bool all);
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop);
} // namespace internal
} // namespace base
#endif

View File

@ -1,101 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Linux-specific part of spinlock_internal.cc
*/
#include <errno.h>
#include <sched.h>
#include <time.h>
#include <limits.h>
#include "base/linux_syscall_support.h"
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_PRIVATE_FLAG 128
static bool have_futex;
static int futex_private_flag = FUTEX_PRIVATE_FLAG;
namespace {
static struct InitModule {
InitModule() {
int x = 0;
// futexes are ints, so we can use them only when
// that's the same size as the lockword_ in SpinLock.
have_futex = (sizeof (Atomic32) == sizeof (int) &&
sys_futex(&x, FUTEX_WAKE, 1, NULL, NULL, 0) >= 0);
if (have_futex &&
sys_futex(&x, FUTEX_WAKE | futex_private_flag, 1, NULL, NULL, 0) < 0) {
futex_private_flag = 0;
}
}
} init_module;
} // anonymous namespace
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
if (loop != 0) {
int save_errno = errno;
struct timespec tm;
tm.tv_sec = 0;
if (have_futex) {
tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
} else {
tm.tv_nsec = 2000001; // above 2ms so linux 2.4 doesn't spin
}
if (have_futex) {
tm.tv_nsec *= 16; // increase the delay; we expect explicit wakeups
sys_futex(reinterpret_cast<int *>(const_cast<Atomic32 *>(w)),
FUTEX_WAIT | futex_private_flag,
value, reinterpret_cast<struct kernel_timespec *>(&tm),
NULL, 0);
} else {
nanosleep(&tm, NULL);
}
errno = save_errno;
}
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
if (have_futex) {
sys_futex(reinterpret_cast<int *>(const_cast<Atomic32 *>(w)),
FUTEX_WAKE | futex_private_flag, all? INT_MAX : 1,
NULL, NULL, 0);
}
}
} // namespace internal
} // namespace base

View File

@ -1,63 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Posix-specific part of spinlock_internal.cc
*/
#include "../config.h"
#include <errno.h>
#ifdef HAVE_SCHED_H
#include <sched.h> /* For sched_yield() */
#endif
#include <time.h> /* For nanosleep() */
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
int save_errno = errno;
if (loop == 0) {
} else if (loop == 1) {
sched_yield();
} else {
struct timespec tm;
tm.tv_sec = 0;
tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
nanosleep(&tm, NULL);
}
errno = save_errno;
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
}
} // namespace internal
} // namespace base

View File

@ -1,54 +0,0 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Win32-specific part of spinlock_internal.cc
*/
#include <windows.h>
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
if (loop == 0) {
} else if (loop == 1) {
Sleep(0);
} else {
Sleep(base::internal::SuggestedDelayNS(loop) / 1000000);
}
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
}
} // namespace internal
} // namespace base

Some files were not shown because too many files have changed in this diff Show More