diff --git a/.gitignore b/.gitignore index 15c484e4c39..6c0865d1959 100644 --- a/.gitignore +++ b/.gitignore @@ -10,12 +10,13 @@ *.logrt /build -/docs/en_single_page/ -/docs/ru_single_page/ -/docs/venv/ -/docs/build/ +/docs/build +/docs/edit +/docs/tools/venv/ /docs/en/development/build/ /docs/ru/development/build/ +/docs/en/single.md +/docs/ru/single.md # callgrind files callgrind.out.* @@ -177,7 +178,6 @@ utils/zookeeper-create-entry-to-download-part/zookeeper-create-entry-to-download utils/zookeeper-dump-tree/zookeeper-dump-tree utils/zookeeper-remove-by-list/zookeeper-remove-by-list dbms/src/Storages/tests/remove_symlink_directory -dbms/tests/queries/1_stateful debian/control debian/copyright debian/tmp/ @@ -240,3 +240,6 @@ node_modules public website/docs website/presentations +website/package-lock.json +.DS_Store +*/.DS_Store diff --git a/.gitmodules b/.gitmodules index 1f392b73c83..c43b754dba8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -15,7 +15,7 @@ url = https://github.com/google/cctz.git [submodule "contrib/zlib-ng"] path = contrib/zlib-ng - url = https://github.com/Dead2/zlib-ng.git + url = https://github.com/ClickHouse-Extras/zlib-ng.git [submodule "contrib/googletest"] path = contrib/googletest url = https://github.com/google/googletest.git @@ -37,3 +37,12 @@ [submodule "contrib/llvm"] path = contrib/llvm url = https://github.com/ClickHouse-Extras/llvm +[submodule "contrib/mariadb-connector-c"] + path = contrib/mariadb-connector-c + url = https://github.com/MariaDB/mariadb-connector-c.git +[submodule "contrib/jemalloc"] + path = contrib/jemalloc + url = https://github.com/jemalloc/jemalloc.git +[submodule "contrib/unixodbc"] + path = contrib/unixodbc + url = https://github.com/ClickHouse-Extras/UnixODBC.git diff --git a/.travis.yml b/.travis.yml index 705b6977114..d658b8d285c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,26 +3,6 @@ language: generic matrix: fast_finish: true include: -# - os: linux -# -# cache: -# ccache: true -# timeout: 1000 -# -# addons: -# apt: -# update: true -# sources: -# - ubuntu-toolchain-r-test -# packages: [ g++-7, libicu-dev, libreadline-dev, libmysqlclient-dev, unixodbc-dev, libltdl-dev, libssl-dev, libboost-dev, zlib1g-dev, libdouble-conversion-dev, libsparsehash-dev, librdkafka-dev, libcapnp-dev, libsparsehash-dev, libgoogle-perftools-dev, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo, openssl ] -# -# env: -# - MATRIX_EVAL="export CC=gcc-7 && export CXX=g++-7" -# -# script: -# - env TEST_RUN= utils/travis/normal.sh - - # We need to have gcc7 headers to compile c++17 code on clang - os: linux @@ -41,33 +21,11 @@ matrix: packages: [ ninja-build, g++-7, clang-5.0, lld-5.0, libicu-dev, libreadline-dev, libmysqlclient-dev, unixodbc-dev, libltdl-dev, libssl-dev, libboost-dev, zlib1g-dev, libdouble-conversion-dev, libsparsehash-dev, librdkafka-dev, libcapnp-dev, libsparsehash-dev, libgoogle-perftools-dev, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo, openssl] env: - - MATRIX_EVAL="export CC=clang-5.0 && export CXX=clang++-5.0" + - MATRIX_EVAL="export CC=clang-5.0 CXX=clang++-5.0" script: - utils/travis/normal.sh - -# TODO: fix internal compiler -# - os: linux -# -# sudo: required -# -# cache: -# timeout: 1000 -# directories: -# - /var/cache/pbuilder/ccache -# -# addons: -# apt: -# packages: [ pbuilder, fakeroot, debhelper ] -# -# env: -# - MATRIX_EVAL="export DEB_CC=clang-5.0 && export DEB_CXX=clang++-5.0" -# -# script: -# - utils/travis/pbuilder.sh - - - os: linux sudo: required @@ -85,69 +43,6 @@ matrix: script: - utils/travis/pbuilder.sh - -# - os: linux -# -# sudo: required -# -# cache: -# timeout: 1000 -# directories: -# - /var/cache/pbuilder/ccache -# -# addons: -# apt: -# update: true -# packages: [ pbuilder, fakeroot, debhelper ] -# -# env: -# - MATRIX_EVAL="export ARCH=i386" -# -# script: -# - env PBUILDER_TIMEOUT=40m TEST_TRUE=true TEST_RUN= utils/travis/pbuilder.sh - - -# TODO: Can't bootstrap bionic on trusty host -# - os: linux -# -# sudo: required -# -# cache: -# timeout: 1000 -# directories: -# - /var/cache/pbuilder/ccache -# -# addons: -# apt: -# update: true -# packages: [ pbuilder, fakeroot, debhelper ] -# -# env: -# - MATRIX_EVAL="export DEB_CC=clang-6.0 && export DEB_CXX=clang++-6.0 && export DIST=bionic && export EXTRAPACKAGES='clang-6.0 lld-6.0'" -# -# script: -# - utils/travis/pbuilder.sh - - -# Cant fit to time limit (48min) -# - os: osx -# osx_image: xcode9.2 -# -# cache: -# ccache: true -# timeout: 1000 -# -# before_install: -# - brew install unixodbc gcc ccache libtool gettext zlib readline double-conversion gperftools google-sparsehash lz4 zstd || true -# - brew link --overwrite gcc || true -# -# env: -# - MATRIX_EVAL="export CC=gcc-8 && export CXX=g++-8" -# -# script: -# - env CMAKE_FLAGS="-DUSE_INTERNAL_BOOST_LIBRARY=1" utils/travis/normal.sh - - allow_failures: - os: osx diff --git a/CHANGELOG.draft.md b/CHANGELOG.draft.md index 811a7c2bb4e..93c681b0336 100644 --- a/CHANGELOG.draft.md +++ b/CHANGELOG.draft.md @@ -1,4 +1 @@ -# en: - - -# ru: +## RU diff --git a/CHANGELOG.md b/CHANGELOG.md index b2660fad300..99994b0621d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,28 +1,227 @@ -# ClickHouse release 1.1.54385, 2018-06-01 +## ClickHouse release 18.5.1, 2018-07-31 -## Bug fixes: +### New features: + +* Added the hash function `murmurHash2_32` [#2756](https://github.com/yandex/ClickHouse/pull/2756). + +### Improvements: + +* Now you can use the `from_env` attribute to set values in config files from environment variables [#2741](https://github.com/yandex/ClickHouse/pull/2741). +* Added case-insensitive versions of the `coalesce`, `ifNull`, and `nullIf functions` [#2752](https://github.com/yandex/ClickHouse/pull/2752). + +### Bug fixes: + +* Fixed a possible bug when starting a replica [#2759](https://github.com/yandex/ClickHouse/pull/2759). + +## ClickHouse release 18.4.0, 2018-07-28 + +### New features: + +* Added system tables: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [#2721](https://github.com/yandex/ClickHouse/pull/2721). +* Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster` table function [#2708](https://github.com/yandex/ClickHouse/pull/2708). +* Support for `HTTP Basic` authentication in the replication protocol [#2727](https://github.com/yandex/ClickHouse/pull/2727). +* The `has` function now allows searching for a numeric value in an array of `Enum` values [Maxim Khrisanfov](https://github.com/yandex/ClickHouse/pull/2699). +* Support for adding arbitrary message separators when reading from `Kafka` [Amos Bird](https://github.com/yandex/ClickHouse/pull/2701). + +### Improvements: + +* The `ALTER TABLE t DELETE WHERE` query does not rewrite data chunks that were not affected by the WHERE condition [#2694](https://github.com/yandex/ClickHouse/pull/2694). +* The `use_minimalistic_checksums_in_zookeeper` option for `ReplicatedMergeTree` tables is enabled by default. This setting was added in version 1.1.54378, 2018-04-16. Versions that are older than 1.1.54378 can no longer be installed. +* Support for running `KILL` and `OPTIMIZE` queries that specify `ON CLUSTER` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2689). + +### Bug fixes: + +* Fixed the error `Column ... is not under an aggregate function and not in GROUP BY` for aggregation with an IN expression. This bug appeared in version 18.1.0. ([bbdd780b](https://github.com/yandex/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +* Fixed a bug in the `windowFunnel` aggregate function [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2735). +* Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +* Fixed server crash when using the `countArray()` aggregate function. + +### Backward incompatible changes: + +* Parameters for `Kafka` engine was changed from `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` to `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. If your tables use `kafka_schema` or `kafka_num_consumers` parameters, you have to manually edit the metadata files `path/metadata/database/table.sql` and add `kafka_row_delimiter` parameter with `''` value. + +## ClickHouse release 18.1.0, 2018-07-23 + +### New features: + +* Support for the `ALTER TABLE t DELETE WHERE` query for non-replicated MergeTree tables ([#2634](https://github.com/yandex/ClickHouse/pull/2634)). +* Support for arbitrary types for the `uniq*` family of aggregate functions ([#2010](https://github.com/yandex/ClickHouse/issues/2010)). +* Support for arbitrary types in comparison operators ([#2026](https://github.com/yandex/ClickHouse/issues/2026)). +* The `users.xml` file allows setting a subnet mask in the format `10.0.0.1/255.255.255.0`. This is necessary for using masks for IPv6 networks with zeros in the middle ([#2637](https://github.com/yandex/ClickHouse/pull/2637)). +* Added the `arrayDistinct` function ([#2670](https://github.com/yandex/ClickHouse/pull/2670)). +* The SummingMergeTree engine can now work with AggregateFunction type columns ([Constantin S. Pan](https://github.com/yandex/ClickHouse/pull/2566)). + +### Improvements: + +* Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backwards compatible, unless otherwise stated in the changelog. +* Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2664)). +* If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/yandex/ClickHouse/pull/2669)). + +### Bug fixes: + +* Fixed the TRUNCATE command for temporary tables ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2624)). +* Fixed a rare deadlock in the ZooKeeper client library that occurred when there was a network error while reading the response ([c315200](https://github.com/yandex/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +* Fixed an error during a CAST to Nullable types ([#1322](https://github.com/yandex/ClickHouse/issues/1322)). +* Fixed the incorrect result of the `maxIntersection()` function when the boundaries of intervals coincided ([Michael Furmur](https://github.com/yandex/ClickHouse/pull/2657)). +* Fixed incorrect transformation of the OR expression chain in a function argument ([chenxing-xc](https://github.com/yandex/ClickHouse/pull/2663)). +* Fixed performance degradation for queries containing `IN (subquery)` expressions inside another subquery ([#2571](https://github.com/yandex/ClickHouse/issues/2571)). +* Fixed incompatibility between servers with different versions in distributed queries that use a `CAST` function that isn't in uppercase letters ([fe8c4d6](https://github.com/yandex/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +* Added missing quoting of identifiers for queries to an external DBMS ([#2635](https://github.com/yandex/ClickHouse/issues/2635)). + +### Backward incompatible changes: + +* Converting a string containing the number zero to DateTime does not work. Example: `SELECT toDateTime('0')`. This is also the reason that `DateTime DEFAULT '0'` does not work in tables, as well as `0` in dictionaries. Solution: replace `0` with `0000-00-00 00:00:00`. + + +## ClickHouse release 1.1.54394, 2018-07-12 + +### New features: + +* Added the `histogram` aggregate function ([Mikhail Surin](https://github.com/yandex/ClickHouse/pull/2521)). +* Now `OPTIMIZE TABLE ... FINAL` can be used without specifying partitions for `ReplicatedMergeTree` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2600)). + +### Bug fixes: + +* Fixed a problem with a very small timeout for sockets (one second) for reading and writing when sending and downloading replicated data, which made it impossible to download larger parts if there is a load on the network or disk (it resulted in cyclical attempts to download parts). This error occurred in version 1.1.54388. +* Fixed issues when using chroot in ZooKeeper if you inserted duplicate data blocks in the table. +* The `has` function now works correctly for an array with Nullable elements ([#2115](https://github.com/yandex/ClickHouse/issues/2115)). +* The `system.tables` table now works correctly when used in distributed queries. The `metadata_modification_time` and `engine_full` columns are now non-virtual. Fixed an error that occurred if only these columns were requested from the table. +* Fixed how an empty `TinyLog` table works after inserting an empty data block ([#2563](https://github.com/yandex/ClickHouse/issues/2563)). +* The `system.zookeeper` table works if the value of the node in ZooKeeper is NULL. + +## ClickHouse release 1.1.54390, 2018-07-06 + +### New features: + +* Queries can be sent in `multipart/form-data` format (in the `query` field), which is useful if external data is also sent for query processing ([Olga Hvostikova](https://github.com/yandex/ClickHouse/pull/2490)). +* Added the ability to enable or disable processing single or double quotes when reading data in CSV format. You can configure this in the `format_csv_allow_single_quotes` and `format_csv_allow_double_quotes` settings ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2574)). +* Now `OPTIMIZE TABLE ... FINAL` can be used without specifying the partition for non-replicated variants of `MergeTree` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2599)). + +### Improvements: + +* Improved performance, reduced memory consumption, and correct tracking of memory consumption with use of the IN operator when a table index could be used ([#2584](https://github.com/yandex/ClickHouse/pull/2584)). +* Removed redundant checking of checksums when adding a data part. This is important when there are a large number of replicas, because in these cases the total number of checks was equal to N^2. +* Added support for `Array(Tuple(...))` arguments for the `arrayEnumerateUniq` function ([#2573](https://github.com/yandex/ClickHouse/pull/2573)). +* Added `Nullable` support for the `runningDifference` function ([#2594](https://github.com/yandex/ClickHouse/pull/2594)). +* Improved query analysis performance when there is a very large number of expressions ([#2572](https://github.com/yandex/ClickHouse/pull/2572)). +* Faster selection of data parts for merging in `ReplicatedMergeTree` tables. Faster recovery of the ZooKeeper session ([#2597](https://github.com/yandex/ClickHouse/pull/2597)). +* The `format_version.txt` file for `MergeTree` tables is re-created if it is missing, which makes sense if ClickHouse is launched after copying the directory structure without files ([Ciprian Hacman](https://github.com/yandex/ClickHouse/pull/2593)). + +### Bug fixes: + +* Fixed a bug when working with ZooKeeper that could make it impossible to recover the session and readonly states of tables before restarting the server. +* Fixed a bug when working with ZooKeeper that could result in old nodes not being deleted if the session is interrupted. +* Fixed an error in the `quantileTDigest` function for Float arguments (this bug was introduced in version 1.1.54388) ([Mikhail Surin](https://github.com/yandex/ClickHouse/pull/2553)). +* Fixed a bug in the index for MergeTree tables if the primary key column is located inside the function for converting types between signed and unsigned integers of the same size ([#2603](https://github.com/yandex/ClickHouse/pull/2603)). +* Fixed segfault if `macros` are used but they aren't in the config file ([#2570](https://github.com/yandex/ClickHouse/pull/2570)). +* Fixed switching to the default database when reconnecting the client ([#2583](https://github.com/yandex/ClickHouse/pull/2583)). +* Fixed a bug that occurred when the `use_index_for_in_with_subqueries` setting was disabled. + +### Security fix: + +* Sending files is no longer possible when connected to MySQL (`LOAD DATA LOCAL INFILE`). + +## ClickHouse release 1.1.54388, 2018-06-28 + +### New features: + +* Support for the `ALTER TABLE t DELETE WHERE` query for replicated tables. Added the `system.mutations` table to track progress of this type of queries. +* Support for the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` query for MergeTree tables. +* Support for the `TRUNCATE TABLE` query ([Winter Zhang](https://github.com/yandex/ClickHouse/pull/2260)). +* Several new `SYSTEM` queries for replicated tables (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). +* Added the ability to write to a table with the MySQL engine and the corresponding table function ([sundy-li](https://github.com/yandex/ClickHouse/pull/2294)). +* Added the `url()` table function and the `URL` table engine ([Alexander Sapin](https://github.com/yandex/ClickHouse/pull/2501)). +* Added the `windowFunnel` aggregate function ([sundy-li](https://github.com/yandex/ClickHouse/pull/2352)). +* New `startsWith` and `endsWith` functions for strings ([Vadim Plakhtinsky](https://github.com/yandex/ClickHouse/pull/2429)). +* The `numbers()` table function now allows you to specify the offset ([Winter Zhang](https://github.com/yandex/ClickHouse/pull/2535)). +* The password to `clickhouse-client` can be entered interactively. +* Server logs can now be sent to syslog ([Alexander Krasheninnikov](https://github.com/yandex/ClickHouse/pull/2459)). +* Support for logging in dictionaries with a shared library source ([Alexander Sapin](https://github.com/yandex/ClickHouse/pull/2472)). +* Support for custom CSV delimiters ([Ivan Zhukov](https://github.com/yandex/ClickHouse/pull/2263)). +* Added the `date_time_input_format` setting. If you switch this setting to `'best_effort'`, DateTime values will be read in a wide range of formats. +* Added the `clickhouse-obfuscator` utility for data obfuscation. Usage example: publishing data used in performance tests. + +### Experimental features: + +* Added the ability to calculate `and` arguments only where they are needed ([Anastasia Tsarkova](https://github.com/yandex/ClickHouse/pull/2272)). +* JIT compilation to native code is now available for some expressions ([pyos](https://github.com/yandex/ClickHouse/pull/2277)). + +### Bug fixes: + +* Duplicates no longer appear for a query with `DISTINCT` and `ORDER BY`. +* Queries with `ARRAY JOIN` and `arrayFilter` no longer return an incorrect result. +* Fixed an error when reading an array column from a Nested structure ([#2066](https://github.com/yandex/ClickHouse/issues/2066)). +* Fixed an error when analyzing queries with a HAVING section like `HAVING tuple IN (...)`. +* Fixed an error when analyzing queries with recursive aliases. +* Fixed an error when reading from ReplacingMergeTree with a condition in PREWHERE that filters all rows ([#2525](https://github.com/yandex/ClickHouse/issues/2525)). +* User profile settings were not applied when using sessions in the HTTP interface. +* Fixed how settings are applied from the command line parameters in `clickhouse-local`. +* The ZooKeeper client library now uses the session timeout received from the server. +* Fixed a bug in the ZooKeeper client library when the client waited for the server response longer than the timeout. +* Fixed pruning of parts for queries with conditions on partition key columns ([#2342](https://github.com/yandex/ClickHouse/issues/2342)). +* Merges are now possible after `CLEAR COLUMN IN PARTITION` ([#2315](https://github.com/yandex/ClickHouse/issues/2315)). +* Type mapping in the ODBC table function has been fixed ([sundy-li](https://github.com/yandex/ClickHouse/pull/2268)). +* Type comparisons have been fixed for `DateTime` with and without the time zone ([Alexander Bocharov](https://github.com/yandex/ClickHouse/pull/2400)). +* Fixed syntactic parsing and formatting of the `CAST` operator. +* Fixed insertion into a materialized view for the Distributed table engine ([Babacar Diassé](https://github.com/yandex/ClickHouse/pull/2411)). +* Fixed a race condition when writing data from the `Kafka` engine to materialized views ([Yangkuan Liu](https://github.com/yandex/ClickHouse/pull/2448)). +* Fixed SSRF in the `remote()` table function. +* Fixed exit behavior of `clickhouse-client` in multiline mode ([#2510](https://github.com/yandex/ClickHouse/issues/2510)). + +### Improvements: + +* Background tasks in replicated tables are now performed in a thread pool instead of in separate threads ([Silviu Caragea](https://github.com/yandex/ClickHouse/pull/1722)). +* Improved LZ4 compression performance. +* Faster analysis for queries with a large number of JOINs and sub-queries. +* The DNS cache is now updated automatically when there are too many network errors. +* Table inserts no longer occur if the insert into one of the materialized views is not possible because it has too many parts. +* Corrected the discrepancy in the event counters `Query`, `SelectQuery`, and `InsertQuery`. +* Expressions like `tuple IN (SELECT tuple)` are allowed if the tuple types match. +* A server with replicated tables can start even if you haven't configured ZooKeeper. +* When calculating the number of available CPU cores, limits on cgroups are now taken into account ([Atri Sharma](https://github.com/yandex/ClickHouse/pull/2325)). +* Added chown for config directories in the systemd config file ([Mikhail Shiryaev](https://github.com/yandex/ClickHouse/pull/2421)). + +### Build changes: + +* The gcc8 compiler can be used for builds. +* Added the ability to build llvm from a submodule. +* The version of the librdkafka library has been updated to v0.11.4. +* Added the ability to use the system libcpuid library. The library version has been updated to 0.4.0. +* Fixed the build using the vectorclass library ([Babacar Diassé](https://github.com/yandex/ClickHouse/pull/2274)). +* Cmake now generates files for ninja by default (like when using `-G Ninja`). +* Added the ability to use the libtinfo library instead of libtermcap ([Georgy Kondratiev](https://github.com/yandex/ClickHouse/pull/2519)). +* Fixed a header file conflict in Fedora Rawhide ([#2520](https://github.com/yandex/ClickHouse/issues/2520)). + +### Backward incompatible changes: + +* Removed escaping in `Vertical` and `Pretty*` formats and deleted the `VerticalRaw` format. +* If servers with version 1.1.54388 (or newer) and servers with older version are used simultaneously in distributed query and the query has `cast(x, 'Type')` expression in the form without `AS` keyword and with `cast` not in uppercase, then the exception with message like `Not found column cast(0, 'UInt8') in block` will be thrown. Solution: update server on all cluster nodes. + +## ClickHouse release 1.1.54385, 2018-06-01 + +### Bug fixes: * Fixed an error that in some cases caused ZooKeeper operations to block. -# ClickHouse release 1.1.54383, 2018-05-22 +## ClickHouse release 1.1.54383, 2018-05-22 -## Bug fixes: +### Bug fixes: * Fixed a slowdown of replication queue if a table has many replicas. -# ClickHouse release 1.1.54381, 2018-05-14 +## ClickHouse release 1.1.54381, 2018-05-14 -## Bug fixes: +### Bug fixes: * Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server. -# ClickHouse release 1.1.54380, 2018-04-21 +## ClickHouse release 1.1.54380, 2018-04-21 -## New features: +### New features: * Added table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: `ln -s /dev/urandom /var/lib/clickhouse/user_files/random` `clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10"`. -## Improvements: +### Improvements: * Subqueries could be wrapped by `()` braces (to enhance queries readability). For example, `(SELECT 1) UNION ALL (SELECT 1)`. * Simple `SELECT` queries from table `system.processes` are not counted in `max_concurrent_queries` limit. -## Bug fixes: +### Bug fixes: * Fixed incorrect behaviour of `IN` operator when select from `MATERIALIZED VIEW`. * Fixed incorrect filtering by partition index in expressions like `WHERE partition_key_column IN (...)` * Fixed inability to execute `OPTIMIZE` query on non-leader replica if the table was `REANAME`d. @@ -30,11 +229,11 @@ * Fixed freezing of `KILL QUERY` queries. * Fixed an error in ZooKeeper client library which led to watches loses, freezing of distributed DDL queue and slowing replication queue if non-empty `chroot` prefix is used in ZooKeeper configuration. -## Backward incompatible changes: +### Backward incompatible changes: * Removed support of expressions like `(a, b) IN (SELECT (a, b))` (instead of them you can use their equivalent `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined data filtering or caused errors. -# ClickHouse release 1.1.54378, 2018-04-16 -## New features: +## ClickHouse release 1.1.54378, 2018-04-16 +### New features: * Logging level can be changed without restarting the server. * Added the `SHOW CREATE DATABASE` query. @@ -48,7 +247,7 @@ * Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson). * When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was cancelled` exception instead of an incomplete response. -## Improvements: +### Improvements: * `ALTER TABLE ... DROP/DETACH PARTITION` queries are run at the front of the replication queue. * `SELECT ... FINAL` and `OPTIMIZE ... FINAL` can be used even when the table has a single data part. @@ -59,7 +258,7 @@ * More robust crash recovery for asynchronous insertion into `Distributed` tables. * The return type of the `countEqual` function changed from `UInt32` to `UInt64` (谢磊). -## Bug fixes: +### Bug fixes: * Fixed an error with `IN` when the left side of the expression is `Nullable`. * Correct results are now returned when using tuples with `IN` when some of the tuple components are in the table index. @@ -75,31 +274,31 @@ * `SummingMergeTree` now works correctly for summation of nested data structures with a composite key. * Fixed the possibility of a race condition when choosing the leader for `ReplicatedMergeTree` tables. -## Build changes: +### Build changes: * The build supports `ninja` instead of `make` and uses it by default for building releases. * Renamed packages: `clickhouse-server-base` is now `clickhouse-common-static`; `clickhouse-server-common` is now `clickhouse-server`; `clickhouse-common-dbg` is now `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility. -## Backward-incompatible changes: +### Backward-incompatible changes: * Removed the special interpretation of an IN expression if an array is specified on the left side. Previously, the expression `arr IN (set)` was interpreted as "at least one `arr` element belongs to the `set`". To get the same behavior in the new version, write `arrayExists(x -> x IN (set), arr)`. * Disabled the incorrect use of the socket option `SO_REUSEPORT`, which was incorrectly enabled by default in the Poco library. Note that on Linux there is no longer any reason to simultaneously specify the addresses `::` and `0.0.0.0` for listen – use just `::`, which allows listening to the connection both over IPv4 and IPv6 (with the default kernel config settings). You can also revert to the behavior from previous versions by specifying `1` in the config. -# ClickHouse release 1.1.54370, 2018-03-16 +## ClickHouse release 1.1.54370, 2018-03-16 -## New features: +### New features: * Added the `system.macros` table and auto updating of macros when the config file is changed. * Added the `SYSTEM RELOAD CONFIG` query. * Added the `maxIntersections(left_col, right_col)` aggregate function, which returns the maximum number of simultaneously intersecting intervals `[left; right]`. The `maxIntersectionsPosition(left, right)` function returns the beginning of the "maximum" interval. ([Michael Furmur](https://github.com/yandex/ClickHouse/pull/2012)). -## Improvements: +### Improvements: * When inserting data in a `Replicated` table, fewer requests are made to `ZooKeeper` (and most of the user-level errors have disappeared from the `ZooKeeper` log). * Added the ability to create aliases for sets. Example: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. -## Bug fixes: +### Bug fixes: * Fixed the `Illegal PREWHERE` error when reading from `Merge` tables over `Distributed` tables. * Added fixes that allow you to run `clickhouse-server` in IPv4-only Docker containers. @@ -113,9 +312,9 @@ * Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side argument of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358. * Removed extraneous error-level logging of `Not found column ... in block`. -# ClickHouse release 1.1.54356, 2018-03-06 +## ClickHouse release 1.1.54356, 2018-03-06 -## New features: +### New features: * Aggregation without `GROUP BY` for an empty set (such as `SELECT count(*) FROM table WHERE 0`) now returns a result with one row with null values for aggregate functions, in compliance with the SQL standard. To restore the old behavior (return an empty result), set `empty_result_for_aggregation_by_empty_set` to 1. * Added type conversion for `UNION ALL`. Different alias names are allowed in `SELECT` positions in `UNION ALL`, in compliance with the SQL standard. @@ -150,7 +349,7 @@ * `RENAME TABLE` can be performed for `VIEW`. * Added the `odbc_default_field_size` option, which allows you to extend the maximum size of the value loaded from an ODBC source (by default, it is 1024). -## Improvements: +### Improvements: * Limits and quotas on the result are no longer applied to intermediate data for `INSERT SELECT` queries or for `SELECT` subqueries. * Fewer false triggers of `force_restore_data` when checking the status of `Replicated` tables when the server starts. @@ -166,7 +365,7 @@ * `Enum` values can be used in `min`, `max`, `sum` and some other functions. In these cases, it uses the corresponding numeric values. This feature was previously available but was lost in the release 1.1.54337. * Added `max_expanded_ast_elements` to restrict the size of the AST after recursively expanding aliases. -## Bug fixes: +### Bug fixes: * Fixed cases when unnecessary columns were removed from subqueries in error, or not removed from subqueries containing `UNION ALL`. * Fixed a bug in merges for `ReplacingMergeTree` tables. @@ -192,18 +391,18 @@ * Fixed a crash when passing arrays of different sizes to an `arrayReduce` function when using aggregate functions from multiple arguments. * Prohibited the use of queries with `UNION ALL` in a `MATERIALIZED VIEW`. -## Backward incompatible changes: +### Backward incompatible changes: * Removed the `distributed_ddl_allow_replicated_alter` option. This behavior is enabled by default. * Removed the `UnsortedMergeTree` engine. -# ClickHouse release 1.1.54343, 2018-02-05 +## ClickHouse release 1.1.54343, 2018-02-05 * Added macros support for defining cluster names in distributed DDL queries and constructors of Distributed tables: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. * Now the table index is used for conditions like `expr IN (subquery)`. * Improved processing of duplicates when inserting to Replicated tables, so they no longer slow down execution of the replication queue. -# ClickHouse release 1.1.54342, 2018-01-22 +## ClickHouse release 1.1.54342, 2018-01-22 This release contains bug fixes for the previous release 1.1.54337: * Fixed a regression in 1.1.54337: if the default user has readonly access, then the server refuses to start up with the message `Cannot create database in readonly mode`. @@ -214,9 +413,9 @@ This release contains bug fixes for the previous release 1.1.54337: * Buffer tables now work correctly when MATERIALIZED columns are present in the destination table (by zhang2014). * Fixed a bug in implementation of NULL. -# ClickHouse release 1.1.54337, 2018-01-18 +## ClickHouse release 1.1.54337, 2018-01-18 -## New features: +### New features: * Added support for storage of multidimensional arrays and tuples (`Tuple` data type) in tables. * Added support for table functions in `DESCRIBE` and `INSERT` queries. Added support for subqueries in `DESCRIBE`. Examples: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Support for `INSERT INTO TABLE` syntax in addition to `INSERT INTO`. @@ -247,7 +446,7 @@ This release contains bug fixes for the previous release 1.1.54337: * Added the `--silent` option for the `clickhouse-local` tool. It suppresses printing query execution info in stderr. * Added support for reading values of type `Date` from text in a format where the month and/or day of the month is specified using a single digit instead of two digits (Amos Bird). -## Performance optimizations: +### Performance optimizations: * Improved performance of `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` aggregate functions for String arguments. * Improved performance of `isInfinite`, `isFinite`, `isNaN`, `roundToExp2` functions. @@ -256,7 +455,7 @@ This release contains bug fixes for the previous release 1.1.54337: * Lowered memory usage for `JOIN` in the case when the left and right parts have columns with identical names that are not contained in `USING`. * Improved performance of `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, and `corr` aggregate functions by reducing computational stability. The old functions are available under the names: `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. -## Bug fixes: +### Bug fixes: * Fixed data deduplication after running a `DROP PARTITION` query. In the previous version, dropping a partition and INSERTing the same data again was not working because INSERTed blocks were considered duplicates. * Fixed a bug that could lead to incorrect interpretation of the `WHERE` clause for `CREATE MATERIALIZED VIEW` queries with `POPULATE`. @@ -295,7 +494,7 @@ This release contains bug fixes for the previous release 1.1.54337: * Fixed the `SYSTEM DROP DNS CACHE` query: the cache was flushed but addresses of cluster nodes were not updated. * Fixed the behavior of `MATERIALIZED VIEW` after executing `DETACH TABLE` for the table under the view (Marek Vavruša). -## Build improvements: +### Build improvements: * Builds use `pbuilder`. The build process is almost completely independent of the build host environment. * A single build is used for different OS versions. Packages and binaries have been made compatible with a wide range of Linux systems. @@ -309,7 +508,7 @@ This release contains bug fixes for the previous release 1.1.54337: * Removed usage of GNU extensions from the code. Enabled the `-Wextra` option. When building with `clang`, `libc++` is used instead of `libstdc++`. * Extracted `clickhouse_parsers` and `clickhouse_common_io` libraries to speed up builds of various tools. -## Backward incompatible changes: +### Backward incompatible changes: * The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table doesn't have `Nullable` columns or if the type of your table is not `Log`, then you don't need to do anything. * Removed the `experimental_allow_extended_storage_definition_syntax` setting. Now this feature is enabled by default. @@ -320,16 +519,16 @@ This release contains bug fixes for the previous release 1.1.54337: * In previous server versions there was an undocumented feature: if an aggregate function depends on parameters, you can still specify it without parameters in the AggregateFunction data type. Example: `AggregateFunction(quantiles, UInt64)` instead of `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. This feature was lost. Although it was undocumented, we plan to support it again in future releases. * Enum data types cannot be used in min/max aggregate functions. The possibility will be returned back in future release. -## Please note when upgrading: +### Please note when upgrading: * When doing a rolling update on a cluster, at the point when some of the replicas are running the old version of ClickHouse and some are running the new version, replication is temporarily stopped and the message `unknown parameter 'shard'` appears in the log. Replication will continue after all replicas of the cluster are updated. * If you have different ClickHouse versions on the cluster, you can get incorrect results for distributed queries with the aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, and `corr`. You should update all cluster nodes. -# ClickHouse release 1.1.54327, 2017-12-21 +## ClickHouse release 1.1.54327, 2017-12-21 This release contains bug fixes for the previous release 1.1.54318: * Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like `Part ... from own log doesn't exist.` The issue is relevant even if you don't see these messages in logs. -# ClickHouse release 1.1.54318, 2017-11-30 +## ClickHouse release 1.1.54318, 2017-11-30 This release contains bug fixes for the previous release 1.1.54310: * Fixed incorrect row deletions during merges in the SummingMergeTree engine @@ -338,9 +537,9 @@ This release contains bug fixes for the previous release 1.1.54310: * Fixed an issue that was causing the replication queue to stop running * Fixed rotation and archiving of server logs -# ClickHouse release 1.1.54310, 2017-11-01 +## ClickHouse release 1.1.54310, 2017-11-01 -## New features: +### New features: * Custom partitioning key for the MergeTree family of table engines. * [Kafka](https://clickhouse.yandex/docs/en/single/index.html#document-table_engines/kafka) table engine. * Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse. @@ -356,12 +555,12 @@ This release contains bug fixes for the previous release 1.1.54310: * Added support for the Cap'n Proto input format. * You can now customize compression level when using the zstd algorithm. -## Backward incompatible changes: +### Backward incompatible changes: * Creation of temporary tables with an engine other than Memory is forbidden. * Explicit creation of tables with the View or MaterializedView engine is forbidden. * During table creation, a new check verifies that the sampling key expression is included in the primary key. -## Bug fixes: +### Bug fixes: * Fixed hangups when synchronously inserting into a Distributed table. * Fixed nonatomic adding and removing of parts in Replicated tables. * Data inserted into a materialized view is not subjected to unnecessary deduplication. @@ -371,15 +570,15 @@ This release contains bug fixes for the previous release 1.1.54310: * Fixed hangups when the disk volume containing server logs is full. * Fixed an overflow in the `toRelativeWeekNum` function for the first week of the Unix epoch. -## Build improvements: +### Build improvements: * Several third-party libraries (notably Poco) were updated and converted to git submodules. -# ClickHouse release 1.1.54304, 2017-10-19 +## ClickHouse release 1.1.54304, 2017-10-19 -## New features: +### New features: * TLS support in the native protocol (to enable, set `tcp_ssl_port` in `config.xml`) -## Bug fixes: +### Bug fixes: * `ALTER` for replicated tables now tries to start running as soon as possible * Fixed crashing when reading data with the setting `preferred_block_size_bytes=0` * Fixed crashes of `clickhouse-client` when `Page Down` is pressed @@ -392,16 +591,16 @@ This release contains bug fixes for the previous release 1.1.54310: * Users are updated correctly when `users.xml` is invalid * Correct handling when an executable dictionary returns a non-zero response code -# ClickHouse release 1.1.54292, 2017-09-20 +## ClickHouse release 1.1.54292, 2017-09-20 -## New features: +### New features: * Added the `pointInPolygon` function for working with coordinates on a coordinate plane. * Added the `sumMap` aggregate function for calculating the sum of arrays, similar to `SummingMergeTree`. * Added the `trunc` function. Improved performance of the rounding functions (`round`, `floor`, `ceil`, `roundToExp2`) and corrected the logic of how they work. Changed the logic of the `roundToExp2` function for fractions and negative numbers. * The ClickHouse executable file is now less dependent on the libc version. The same ClickHouse executable file can run on a wide variety of Linux systems. Note: There is still a dependency when using compiled queries (with the setting `compile = 1`, which is not used by default). * Reduced the time needed for dynamic compilation of queries. -## Bug fixes: +### Bug fixes: * Fixed an error that sometimes produced `part ... intersects previous part` messages and weakened replica consistency. * Fixed an error that caused the server to lock up if ZooKeeper was unavailable during shutdown. * Removed excessive logging when restoring replicas. @@ -409,9 +608,9 @@ This release contains bug fixes for the previous release 1.1.54310: * Fixed an error in the concat function that occurred if the first column in a block has the Array type. * Progress is now displayed correctly in the system.merges table. -# ClickHouse release 1.1.54289, 2017-09-13 +## ClickHouse release 1.1.54289, 2017-09-13 -## New features: +### New features: * `SYSTEM` queries for server administration: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. * Added functions for working with arrays: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. * Added the `root` and `identity` parameters for the ZooKeeper configuration. This allows you to isolate individual users on the same ZooKeeper cluster. @@ -426,7 +625,7 @@ This release contains bug fixes for the previous release 1.1.54310: * Option to set `umask` in the config file. * Improved performance for queries with `DISTINCT`. -## Bug fixes: +### Bug fixes: * Improved the process for deleting old nodes in ZooKeeper. Previously, old nodes sometimes didn't get deleted if there were very frequent inserts, which caused the server to be slow to shut down, among other things. * Fixed randomization when choosing hosts for the connection to ZooKeeper. * Fixed the exclusion of lagging replicas in distributed queries if the replica is localhost. @@ -439,28 +638,28 @@ This release contains bug fixes for the previous release 1.1.54310: * Resolved the appearance of zombie processes when using a dictionary with an `executable` source. * Fixed segfault for the HEAD query. -## Improvements to development workflow and ClickHouse build: +### Improvements to development workflow and ClickHouse build: * You can use `pbuilder` to build ClickHouse. * You can use `libc++` instead of `libstdc++` for builds on Linux. * Added instructions for using static code analysis tools: `Coverity`, `clang-tidy`, and `cppcheck`. -## Please note when upgrading: +### Please note when upgrading: * There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT requests will fail with the message "Merges are processing significantly slower than inserts." Use the `SELECT * FROM system.merges` request to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don't need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting (to do this, go to the `` section in config.xml, set `107374182400` and restart the server). -# ClickHouse release 1.1.54284, 2017-08-29 +## ClickHouse release 1.1.54284, 2017-08-29 * This is bugfix release for previous 1.1.54282 release. It fixes ZooKeeper nodes leak in `parts/` directory. -# ClickHouse release 1.1.54282, 2017-08-23 +## ClickHouse release 1.1.54282, 2017-08-23 This is a bugfix release. The following bugs were fixed: * `DB::Exception: Assertion violation: !_path.empty()` error when inserting into a Distributed table. * Error when parsing inserted data in RowBinary format if the data begins with ';' character. * Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). -# ClickHouse release 1.1.54276, 2017-08-16 +## ClickHouse release 1.1.54276, 2017-08-16 -## New features: +### New features: * You can use an optional WITH clause in a SELECT query. Example query: `WITH 1+1 AS a SELECT a, a*a` * INSERT can be performed synchronously in a Distributed table: OK is returned only after all the data is saved on all the shards. This is activated by the setting insert_distributed_sync=1. @@ -471,7 +670,7 @@ This is a bugfix release. The following bugs were fixed: * Added support for non-constant arguments and negative offsets in the function `substring(str, pos, len).` * Added the max_size parameter for the `groupArray(max_size)(column)` aggregate function, and optimized its performance. -## Major changes: +### Major changes: * Improved security: all server files are created with 0640 permissions (can be changed via config parameter). * Improved error messages for queries with invalid syntax. @@ -479,11 +678,11 @@ This is a bugfix release. The following bugs were fixed: * Significantly increased the performance of data merges for the ReplacingMergeTree engine. * Improved performance for asynchronous inserts from a Distributed table by batching multiple source inserts. To enable this functionality, use the setting distributed_directory_monitor_batch_inserts=1. -## Backward incompatible changes: +### Backward incompatible changes: * Changed the binary format of aggregate states of `groupArray(array_column)` functions for arrays. -## Complete list of changes: +### Complete list of changes: * Added the `output_format_json_quote_denormals` setting, which enables outputting nan and inf values in JSON format. * Optimized thread allocation when reading from a Distributed table. @@ -502,7 +701,7 @@ This is a bugfix release. The following bugs were fixed: * It is possible to connect to MySQL through a socket in the file system. * The `system.parts` table has a new column with information about the size of marks, in bytes. -## Bug fixes: +### Bug fixes: * Distributed tables using a Merge table now work correctly for a SELECT query with a condition on the _table field. * Fixed a rare race condition in ReplicatedMergeTree when checking data parts. @@ -526,15 +725,15 @@ This is a bugfix release. The following bugs were fixed: * Fixed the "Cannot mremap" error when using arrays in IN and JOIN clauses with more than 2 billion elements. * Fixed the failover for dictionaries with MySQL as the source. -## Improved workflow for developing and assembling ClickHouse: +### Improved workflow for developing and assembling ClickHouse: * Builds can be assembled in Arcadia. * You can use gcc 7 to compile ClickHouse. * Parallel builds using ccache+distcc are faster now. -# ClickHouse release 1.1.54245, 2017-07-04 +## ClickHouse release 1.1.54245, 2017-07-04 -## New features: +### New features: * Distributed DDL (for example, `CREATE TABLE ON CLUSTER`). * The replicated request `ALTER TABLE CLEAR COLUMN IN PARTITION.` @@ -546,16 +745,16 @@ This is a bugfix release. The following bugs were fixed: * Sessions in the HTTP interface. * The OPTIMIZE query for a Replicated table can can run not only on the leader. -## Backward incompatible changes: +### Backward incompatible changes: * Removed SET GLOBAL. -## Minor changes: +### Minor changes: * If an alert is triggered, the full stack trace is printed into the log. * Relaxed the verification of the number of damaged or extra data parts at startup (there were too many false positives). -## Bug fixes: +### Bug fixes: * Fixed a bad connection "sticking" when inserting into a Distributed table. * GLOBAL IN now works for a query from a Merge table that looks at a Distributed table. diff --git a/CHANGELOG_RU.md b/CHANGELOG_RU.md index 2fe43529a5a..7988e7850c6 100644 --- a/CHANGELOG_RU.md +++ b/CHANGELOG_RU.md @@ -1,6 +1,129 @@ -# ClickHouse release 1.1.54388, 2018-06-28 +## ClickHouse release 18.6.0, 2018-08-02 -## Новые возможности: +### Новые возможности: +* Добавлена поддержка ON выражений для JOIN ON синтаксиса: +`JOIN ON Expr([table.]column, ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` +Выражение должно представлять из себя цепочку равенств, объединенных оператором AND. Каждая часть равенства может являться произвольным выражением над столбцами одной из таблиц. Поддержана возможность использования fully qualified имен столбцов (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) для правой таблицы. [#2742](https://github.com/yandex/ClickHouse/pull/2742) +* Добавлена возможность включить HTTPS для репликации. [#2760](https://github.com/yandex/ClickHouse/pull/2760) + +### Улучшения: +* Сервер передаёт на клиент также patch-компонент своей версии. Данные о patch компоненте версии добавлены в `system.processes` и `query_log`. [#2646](https://github.com/yandex/ClickHouse/pull/2646) + + +## ClickHouse release 18.5.1, 2018-07-31 + +### Новые возможности: +* Добавлена функция хеширования `murmurHash2_32` [#2756](https://github.com/yandex/ClickHouse/pull/2756). + +### Улучшения: +* Добавлена возможность указывать значения в конфигурационных файлах из переменных окружения с помощью атрибута `from_env` [#2741](https://github.com/yandex/ClickHouse/pull/2741). +* Добавлены регистронезависимые версии функций `coalesce`, `ifNull`, `nullIf` [#2752](https://github.com/yandex/ClickHouse/pull/2752). + +### Исправление ошибок: +* Исправлена возможная ошибка при старте реплики [#2759](https://github.com/yandex/ClickHouse/pull/2759). + + +## ClickHouse release 18.4.0, 2018-07-28 + +### Новые возможности: +* Добавлены системные таблицы `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [#2721](https://github.com/yandex/ClickHouse/pull/2721). +* Добавлена возможность использования табличной функции вместо таблицы в качестве аргумента табличной функции `remote` и `cluster` [#2708](https://github.com/yandex/ClickHouse/pull/2708). +* Поддержка `HTTP Basic` аутентификации в протоколе репликации [#2727](https://github.com/yandex/ClickHouse/pull/2727). +* В функции `has` добавлена возможность поиска в массиве значений типа `Enum` по числовому значению [Maxim Khrisanfov](https://github.com/yandex/ClickHouse/pull/2699). +* Поддержка добавления произвольных разделителей сообщений в процессе чтения из `Kafka` [Amos Bird](https://github.com/yandex/ClickHouse/pull/2701). + +### Улучшения: +* Запрос `ALTER TABLE t DELETE WHERE` не перезаписывает куски данных, которые не были затронуты условием WHERE [#2694](https://github.com/yandex/ClickHouse/pull/2694). +* Настройка `use_minimalistic_checksums_in_zookeeper` таблиц семейства `ReplicatedMergeTree` включена по-умолчанию. Эта настройка была добавлена в версии 1.1.54378, 2018-04-16. Установка версий, более старых, чем 1.1.54378, становится невозможной. +* Поддерживается запуск запросов `KILL` и `OPTIMIZE` с указанием `ON CLUSTER` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2689). + +### Исправление ошибок: +* Исправлена ошибка `Column ... is not under aggregate function and not in GROUP BY` в случае агрегации по выражению с оператором IN. Ошибка появилась в версии 18.1.0. ([bbdd780b](https://github.com/yandex/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +* Исправлена ошибка в агрегатной функции `windowFunnel` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2735). +* Исправлена ошибка в агрегатной функции `anyHeavy` ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +* Исправлено падение сервера при использовании функции `countArray()`. + +### Обратно несовместимые изменения: + +* Список параметров для таблиц `Kafka` был изменён с `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` на `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. Если вы использовали параметры `kafka_schema` или `kafka_num_consumers`, вам необходимо вручную отредактировать файлы с метаданными `path/metadata/database/table.sql`, добавив параметр `kafka_row_delimiter` со значением `''` в соответствующее место. + + +## ClickHouse release 18.1.0, 2018-07-23 + +### Новые возможности: +* Поддержка запроса `ALTER TABLE t DELETE WHERE` для нереплицированных MergeTree-таблиц ([#2634](https://github.com/yandex/ClickHouse/pull/2634)). +* Поддержка произвольных типов для семейства агрегатных функций `uniq*` ([#2010](https://github.com/yandex/ClickHouse/issues/2010)). +* Поддержка произвольных типов в операторах сравнения ([#2026](https://github.com/yandex/ClickHouse/issues/2026)). +* Возможность в `users.xml` указывать маску подсети в формате `10.0.0.1/255.255.255.0`. Это необходимо для использования "дырявых" масок IPv6 сетей ([#2637](https://github.com/yandex/ClickHouse/pull/2637)). +* Добавлена функция `arrayDistinct` ([#2670](https://github.com/yandex/ClickHouse/pull/2670)). +* Движок SummingMergeTree теперь может работать со столбцами типа AggregateFunction ([Constantin S. Pan](https://github.com/yandex/ClickHouse/pull/2566)). + +### Улучшения: +* Изменена схема версионирования релизов. Теперь первый компонент содержит год релиза (A.D.; по московскому времени; из номера вычитается 2000), второй - номер крупных изменений (увеличивается для большинства релизов), третий - патч-версия. Релизы по-прежнему обратно совместимы, если другое не указано в changelog. +* Ускорено преобразование чисел с плавающей точкой в строку ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2664)). +* Теперь, если при вставке из-за ошибок парсинга пропущено некоторое количество строк (такое возможно про включённых настройках `input_allow_errors_num`, `input_allow_errors_ratio`), это количество пишется в лог сервера ([Leonardo Cecchi](https://github.com/yandex/ClickHouse/pull/2669)). + +### Исправление ошибок: +* Исправлена работа команды TRUNCATE для временных таблиц ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2624)). +* Исправлен редкий deadlock в клиентской библиотеке ZooKeeper, который возникал при сетевой ошибке во время вычитывания ответа ([c315200](https://github.com/yandex/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +* Исправлена ошибка при CAST в Nullable типы ([#1322](https://github.com/yandex/ClickHouse/issues/1322)). +* Исправлен неправильный результат функции `maxIntersection()` в случае совпадения границ отрезков ([Michael Furmur](https://github.com/yandex/ClickHouse/pull/2657)). +* Исправлено неверное преобразование цепочки OR-выражений в аргументе функции ([chenxing-xc](https://github.com/yandex/ClickHouse/pull/2663)). +* Исправлена деградация производительности запросов, содержащих выражение `IN (подзапрос)` внутри другого подзапроса ([#2571](https://github.com/yandex/ClickHouse/issues/2571)). +* Исправлена несовместимость серверов разных версий при распределённых запросах, использующих функцию `CAST` не в верхнем регистре ([fe8c4d6](https://github.com/yandex/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +* Добавлено недостающее квотирование идентификаторов при запросах к внешним СУБД ([#2635](https://github.com/yandex/ClickHouse/issues/2635)). + +### Обратно несовместимые изменения: +* Не работает преобразование строки, содержащей число ноль, в DateTime. Пример: `SELECT toDateTime('0')`. По той же причине не работает `DateTime DEFAULT '0'` в таблицах, а также `0` в словарях. Решение: заменить `0` на `0000-00-00 00:00:00`. + + +## ClickHouse release 1.1.54394, 2018-07-12 + +### Новые возможности: +* Добавлена агрегатная функция `histogram` ([Михаил Сурин](https://github.com/yandex/ClickHouse/pull/2521)). +* Возможность использования `OPTIMIZE TABLE ... FINAL` без указания партиции для `ReplicatedMergeTree` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2600)). + +### Исправление ошибок: +* Исправлена ошибка - выставление слишком маленького таймаута у сокетов (одна секунда) для чтения и записи при отправке и скачивании реплицируемых данных, что приводило к невозможности скачать куски достаточно большого размера при наличии некоторой нагрузки на сеть или диск (попытки скачивания кусков циклически повторяются). Ошибка возникла в версии 1.1.54388. +* Исправлена работа при использовании chroot в ZooKeeper, в случае вставки дублирующихся блоков данных в таблицу. +* Исправлена работа функции `has` для случая массива с Nullable элементами ([#2115](https://github.com/yandex/ClickHouse/issues/2521)). +* Исправлена работа таблицы `system.tables` при её использовании в распределённых запросах; столбцы `metadata_modification_time` и `engine_full` сделаны невиртуальными; исправлена ошибка в случае, если из таблицы были запрошены только эти столбцы. +* Исправлена работа пустой таблицы типа `TinyLog` после вставки в неё пустого блока данных ([#2563](https://github.com/yandex/ClickHouse/issues/2563)). +* Таблица `system.zookeeper` работает в случае, если значение узла в ZooKeeper равно NULL. + + +## ClickHouse release 1.1.54390, 2018-07-06 + +### Новые возможности: +* Возможность отправки запроса в формате `multipart/form-data` (в поле `query`), что полезно, если при этом также отправляются внешние данные для обработки запроса ([Ольга Хвостикова](https://github.com/yandex/ClickHouse/pull/2490)). +* Добавлена возможность включить или отключить обработку одинарных или двойных кавычек при чтении данных в формате CSV. Это задаётся настройками `format_csv_allow_single_quotes` и `format_csv_allow_double_quotes` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2574)) +* Возможность использования `OPTIMIZE TABLE ... FINAL` без указания партиции для не реплицированных вариантов`MergeTree` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2599)). + +### Улучшения: +* Увеличена производительность, уменьшено потребление памяти, добавлен корректный учёт потребления памяти, при использовании оператора IN в случае, когда для его работы может использоваться индекс таблицы ([#2584](https://github.com/yandex/ClickHouse/pull/2584)). +* Убраны избыточные проверки чексумм при добавлении куска. Это важно в случае большого количества реплик, так как в этом случае суммарное количество проверок было равно N^2. +* Добавлена поддержка аргументов типа `Array(Tuple(...))` для функции `arrayEnumerateUniq` ([#2573](https://github.com/yandex/ClickHouse/pull/2573)). +* Добавлена поддержка `Nullable` для функции `runningDifference`. ([#2594](https://github.com/yandex/ClickHouse/pull/2594)) +* Увеличена производительность анализа запроса в случае очень большого количества выражений ([#2572](https://github.com/yandex/ClickHouse/pull/2572)). +* Более быстрый выбор кусков для слияния в таблицах типа `ReplicatedMergeTree`. Более быстрое восстановление сессии с ZooKeeper. ([#2597](https://github.com/yandex/ClickHouse/pull/2597)). +* Файл `format_version.txt` для таблиц семейства `MergeTree` создаётся заново при его отсутствии, что имеет смысл в случае запуска ClickHouse после копирования структуры директорий без файлов ([Ciprian Hacman](https://github.com/yandex/ClickHouse/pull/2593)). + +### Исправление ошибок: +* Исправлена ошибка при работе с ZooKeeper, которая могла приводить к невозможности восстановления сессии и readonly состояниям таблиц до перезапуска сервера. +* Исправлена ошибка при работе с ZooKeeper, которая могла приводить к неудалению старых узлов при разрыве сессии. +* Исправлена ошибка в функции `quantileTDigest` для Float аргументов (ошибка появилась в версии 1.1.54388) ([Михаил Сурин](https://github.com/yandex/ClickHouse/pull/2553)). +* Исправлена ошибка работы индекса таблиц типа MergeTree, если в условии, столбец первичного ключа расположен внутри функции преобразования типов между знаковым и беззнаковым целым одного размера ([#2603](https://github.com/yandex/ClickHouse/pull/2603)). +* Исправлен segfault, если в конфигурационном файле нет `macros`, но они используются ([#2570](https://github.com/yandex/ClickHouse/pull/2570)). +* Исправлено переключение на базу данных по-умолчанию при переподключении клиента ([#2583](https://github.com/yandex/ClickHouse/pull/2583)). +* Исправлена ошибка в случае отключенной настройки `use_index_for_in_with_subqueries`. + +### Исправления безопасности: +* При соединениях с MySQL удалена возможность отправки файлов (`LOAD DATA LOCAL INFILE`). + + +## ClickHouse release 1.1.54388, 2018-06-28 + +### Новые возможности: * Добавлена поддержка запроса `ALTER TABLE t DELETE WHERE` для реплицированных таблиц и таблица `system.mutations`. * Добавлена поддержка запроса `ALTER TABLE t [REPLACE|ATTACH] PARTITION` для *MergeTree-таблиц. * Добавлена поддержка запроса `TRUNCATE TABLE` ([Winter Zhang](https://github.com/yandex/ClickHouse/pull/2260)) @@ -17,11 +140,11 @@ * Добавлена настройка `date_time_input_format`. Если переключить эту настройку в значение `'best_effort'`, значения DateTime будут читаться в широком диапазоне форматов. * Добавлена утилита `clickhouse-obfuscator` для обфускации данных. Пример использования: публикация данных, используемых в тестах производительности. -## Экспериментальные возможности: +### Экспериментальные возможности: * Добавлена возможность вычислять аргументы функции `and` только там, где они нужны ([Анастасия Царькова](https://github.com/yandex/ClickHouse/pull/2272)) * Добавлена возможность JIT-компиляции в нативный код некоторых выражений ([pyos](https://github.com/yandex/ClickHouse/pull/2277)). -## Исправление ошибок: +### Исправление ошибок: * Исправлено появление дублей в запросе с `DISTINCT` и `ORDER BY`. * Запросы с `ARRAY JOIN` и `arrayFilter` раньше возвращали некорректный результат. * Исправлена ошибка при чтении столбца-массива из Nested-структуры ([#2066](https://github.com/yandex/ClickHouse/issues/2066)). @@ -42,7 +165,7 @@ * Исправлена SSRF в табличной функции remote(). * Исправлен выход из `clickhouse-client` в multiline-режиме ([#2510](https://github.com/yandex/ClickHouse/issues/2510)). -## Улучшения: +### Улучшения: * Фоновые задачи в реплицированных таблицах теперь выполняются не в отдельных потоках, а в пуле потоков ([Silviu Caragea](https://github.com/yandex/ClickHouse/pull/1722)) * Улучшена производительность разжатия LZ4. * Ускорен анализ запроса с большим числом JOIN-ов и подзапросов. @@ -54,7 +177,7 @@ * При расчёте количества доступных ядер CPU теперь учитываются ограничения cgroups ([Atri Sharma](https://github.com/yandex/ClickHouse/pull/2325)). * Добавлен chown директорий конфигов в конфигурационном файле systemd ([Михаил Ширяев](https://github.com/yandex/ClickHouse/pull/2421)). -## Изменения сборки: +### Изменения сборки: * Добавлена возможность сборки компилятором gcc8. * Добавлена возможность сборки llvm из submodule. * Используемая версия библиотеки librdkafka обновлена до v0.11.4. @@ -64,33 +187,34 @@ * Добавлена возможность использования библиотеки libtinfo вместо libtermcap ([Георгий Кондратьев](https://github.com/yandex/ClickHouse/pull/2519)). * Исправлен конфликт заголовочных файлов в Fedora Rawhide ([#2520](https://github.com/yandex/ClickHouse/issues/2520)). -## Обратно несовместимые изменения: +### Обратно несовместимые изменения: * Убран escaping в форматах `Vertical` и `Pretty*`, удалён формат `VerticalRaw`. +* Если в распределённых запросах одновременно участвуют серверы версии 1.1.54388 или новее и более старые, то при использовании выражения `cast(x, 'Type')`, записанного без указания `AS`, если слово `cast` указано не в верхнем регистре, возникает ошибка вида `Not found column cast(0, 'UInt8') in block`. Решение: обновить сервер на всём кластере. -# ClickHouse release 1.1.54385, 2018-06-01 -## Исправление ошибок: +## ClickHouse release 1.1.54385, 2018-06-01 +### Исправление ошибок: * Исправлена ошибка, которая в некоторых случаях приводила к блокировке операций с ZooKeeper. -# ClickHouse release 1.1.54383, 2018-05-22 -## Исправление ошибок: +## ClickHouse release 1.1.54383, 2018-05-22 +### Исправление ошибок: * Исправлена деградация скорости выполнения очереди репликации при большом количестве реплик -# ClickHouse release 1.1.54381, 2018-05-14 +## ClickHouse release 1.1.54381, 2018-05-14 -## Исправление ошибок: +### Исправление ошибок: * Исправлена ошибка, приводящая к "утеканию" метаданных в ZooKeeper при потере соединения с сервером ZooKeeper. -# ClickHouse release 1.1.54380, 2018-04-21 +## ClickHouse release 1.1.54380, 2018-04-21 -## Новые возможности: +### Новые возможности: * Добавлена табличная функция `file(path, format, structure)`. Пример, читающий байты из `/dev/urandom`: `ln -s /dev/urandom /var/lib/clickhouse/user_files/random` `clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10"`. -## Улучшения: +### Улучшения: * Добавлена возможность оборачивать подзапросы скобками `()` для повышения читаемости запросов. Например: `(SELECT 1) UNION ALL (SELECT 1)`. * Простые запросы `SELECT` из таблицы `system.processes` не учитываются в ограничении `max_concurrent_queries`. -## Исправление ошибок: +### Исправление ошибок: * Исправлена неправильная работа оператора `IN` в `MATERIALIZED VIEW`. * Исправлена неправильная работа индекса по ключу партиционирования в выражениях типа `partition_key_column IN (...)`. * Исправлена невозможность выполнить `OPTIMIZE` запрос на лидирующей реплике после выполнения `RENAME` таблицы. @@ -98,13 +222,13 @@ * Исправлены зависания запросов `KILL QUERY`. * Исправлена ошибка в клиентской библиотеке ZooKeeper, которая при использовании непустого префикса `chroot` в конфигурации приводила к потере watch'ей, остановке очереди distributed DDL запросов и замедлению репликации. -## Обратно несовместимые изменения: +### Обратно несовместимые изменения: * Убрана поддержка выражений типа `(a, b) IN (SELECT (a, b))` (можно использовать эквивалентные выражение `(a, b) IN (SELECT a, b)`). Раньше такие запросы могли приводить к недетерминированной фильтрации в `WHERE`. -# ClickHouse release 1.1.54378, 2018-04-16 +## ClickHouse release 1.1.54378, 2018-04-16 -## Новые возможности: +### Новые возможности: * Возможность изменения уровня логгирования без перезагрузки сервера. * Добавлен запрос `SHOW CREATE DATABASE`. @@ -118,7 +242,7 @@ * Возможность указания нескольких `topics` через запятую для движка `Kafka` (Tobias Adamson) * При остановке запроса по причине `KILL QUERY` или `replace_running_query`, клиент получает исключение `Query was cancelled` вместо неполного результата. -## Улучшения: +### Улучшения: * Запросы вида `ALTER TABLE ... DROP/DETACH PARTITION` выполняются впереди очереди репликации. * Возможность использовать `SELECT ... FINAL` и `OPTIMIZE ... FINAL` даже в случае, если данные в таблице представлены одним куском. @@ -129,7 +253,7 @@ * Более надёжное восстановление после сбоев при асинхронной вставке в `Distributed` таблицы. * Возвращаемый тип функции `countEqual` изменён с `UInt32` на `UInt64` (谢磊) -## Исправление ошибок: +### Исправление ошибок: * Исправлена ошибка c `IN` где левая часть выражения `Nullable`. * Исправлен неправильный результат при использовании кортежей с `IN` в случае, если часть компоненнтов кортежа есть в индексе таблицы. @@ -145,31 +269,31 @@ * Исправлена работа `SummingMergeTree` в случае суммирования вложенных структур данных с составным ключом. * Исправлена возможность возникновения race condition при выборе лидера таблиц `ReplicatedMergeTree`. -## Изменения сборки: +### Изменения сборки: * Поддержка `ninja` вместо `make` при сборке. `ninja` используется по-умолчанию при сборке релизов. * Переименованы пакеты `clickhouse-server-base` в `clickhouse-common-static`; `clickhouse-server-common` в `clickhouse-server`; `clickhouse-common-dbg` в `clickhouse-common-static-dbg`. Для установки используйте `clickhouse-server clickhouse-client`. Для совместимости, пакеты со старыми именами продолжают загружаться в репозиторий. -## Обратно несовместимые изменения: +### Обратно несовместимые изменения: * Удалена специальная интерпретация выражения IN, если слева указан массив. Ранее выражение вида `arr IN (set)` воспринималось как "хотя бы один элемент `arr` принадлежит множеству `set`". Для получения такого же поведения в новой версии, напишите `arrayExists(x -> x IN (set), arr)`. * Отключено ошибочное использование опции сокета `SO_REUSEPORT` (которая по ошибке включена по-умолчанию в библиотеке Poco). Стоит обратить внимание, что на Linux системах теперь не имеет смысла указывать одновременно адреса `::` и `0.0.0.0` для listen - следует использовать лишь адрес `::`, который (с настройками ядра по-умолчанию) позволяет слушать соединения как по IPv4 так и по IPv6. Также вы можете вернуть поведение старых версий, указав в конфиге `1`. -# ClickHouse release 1.1.54370, 2018-03-16 +## ClickHouse release 1.1.54370, 2018-03-16 -## Новые возможности: +### Новые возможности: * Добавлена системная таблица `system.macros` и автоматическое обновление макросов при изменении конфигурационного файла. * Добавлен запрос `SYSTEM RELOAD CONFIG`. * Добавлена агрегатная функция `maxIntersections(left_col, right_col)`, возвращающая максимальное количество одновременно пересекающихся интервалов `[left; right]`. Функция `maxIntersectionsPosition(left, right)` возвращает начало такого "максимального" интервала. ([Michael Furmur](https://github.com/yandex/ClickHouse/pull/2012)). -## Улучшения: +### Улучшения: * При вставке данных в `Replicated`-таблицу делается меньше обращений к `ZooKeeper` (также из лога `ZooKeeper` исчезло большинство user-level ошибок). * Добавлена возможность создавать алиасы для множеств. Пример: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. -## Исправление ошибок: +### Исправление ошибок: * Исправлена ошибка `Illegal PREWHERE` при чтении из Merge-таблицы над `Distributed`-таблицами. * Добавлены исправления, позволяющие запускать clickhouse-server в IPv4-only docker-контейнерах. @@ -184,9 +308,9 @@ * Устранено ненужное Error-level логирование `Not found column ... in block`. -# Релиз ClickHouse 1.1.54362, 2018-03-11 +## Релиз ClickHouse 1.1.54362, 2018-03-11 -## Новые возможности: +### Новые возможности: * Агрегация без `GROUP BY` по пустому множеству (как например, `SELECT count(*) FROM table WHERE 0`) теперь возвращает результат из одной строки с нулевыми значениями агрегатных функций, в соответствии со стандартом SQL. Вы можете вернуть старое поведение (возвращать пустой результат), выставив настройку `empty_result_for_aggregation_by_empty_set` в значение 1. * Добавлено приведение типов при `UNION ALL`. Допустимо использование столбцов с разными алиасами в соответствующих позициях `SELECT` в `UNION ALL`, что соответствует стандарту SQL. @@ -224,7 +348,7 @@ * Добавлена настройка `odbc_default_field_size`, позволяющая расширить максимальный размер значения, загружаемого из ODBC источника (по-умолчанию - 1024). * В таблицу `system.processes` и в `SHOW PROCESSLIST` добавлены столбцы `is_cancelled` и `peak_memory_usage`. -## Улучшения: +### Улучшения: * Ограничения на результат и квоты на результат теперь не применяются к промежуточным данным для запросов `INSERT SELECT` и для подзапросов в `SELECT`. * Уменьшено количество ложных срабатываний при проверке состояния `Replicated` таблиц при запуске сервера, приводивших к необходимости выставления флага `force_restore_data`. @@ -240,7 +364,7 @@ * Значения типа `Enum` можно использовать в функциях `min`, `max`, `sum` и некоторых других - в этих случаях используются соответствующие числовые значения. Эта возможность присутствовала ранее, но была потеряна в релизе 1.1.54337. * Добавлено ограничение `max_expanded_ast_elements` действующее на размер AST после рекурсивного раскрытия алиасов. -## Исправление ошибок: +### Исправление ошибок: * Исправлены случаи ошибочного удаления ненужных столбцов из подзапросов, а также отсутствие удаления ненужных столбцов из подзапросов, содержащих `UNION ALL`. * Исправлена ошибка в слияниях для таблиц типа `ReplacingMergeTree`. @@ -268,19 +392,19 @@ * Запрещено использование запросов с `UNION ALL` в `MATERIALIZED VIEW`. * Исправлена ошибка, которая может возникать при инициализации системной таблицы `part_log` при старте сервера (по-умолчанию `part_log` выключен). -## Обратно несовместимые изменения: +### Обратно несовместимые изменения: * Удалена настройка `distributed_ddl_allow_replicated_alter`. Соответствующее поведение включено по-умолчанию. * Удалена настройка `strict_insert_defaults`. Если вы использовали эту функциональность, напишите на `clickhouse-feedback@yandex-team.com`. * Удалён движок таблиц `UnsortedMergeTree`. -# Релиз ClickHouse 1.1.54343, 2018-02-05 +## Релиз ClickHouse 1.1.54343, 2018-02-05 * Добавлена возможность использовать макросы при задании имени кластера в распределенных DLL запросах и создании Distributed-таблиц: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. * Теперь при вычислении запросов вида `SELECT ... FROM table WHERE expr IN (subquery)` используется индекс таблицы `table`. * Улучшена обработка дубликатов при вставке в Replicated-таблицы, теперь они не приводят к излишнему замедлению выполнения очереди репликации. -# Релиз ClickHouse 1.1.54342, 2018-01-22 +## Релиз ClickHouse 1.1.54342, 2018-01-22 Релиз содержит исправление к предыдущему релизу 1.1.54337: * Исправлена регрессия в версии 1.1.54337: если пользователь по-умолчанию имеет readonly доступ, то сервер отказывался стартовать с сообщением `Cannot create database in readonly mode`. @@ -291,9 +415,9 @@ * Таблицы типа Buffer теперь работают при наличии MATERIALIZED столбцов в таблице назначения (by zhang2014). * Исправлена одна из ошибок в реализации NULL. -# Релиз ClickHouse 1.1.54337, 2018-01-18 +## Релиз ClickHouse 1.1.54337, 2018-01-18 -## Новые возможности: +### Новые возможности: * Добавлена поддержка хранения многомерных массивов и кортежей (тип данных `Tuple`) в таблицах. * Поддержка табличных функций для запросов `DESCRIBE` и `INSERT`. Поддержка подзапроса в запросе `DESCRIBE`. Примеры: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Возможность писать `INSERT INTO TABLE` вместо `INSERT INTO`. @@ -322,9 +446,9 @@ * Добавлена поддержка `ALTER` для таблиц типа `Null` (Anastasiya Tsarkova). * Функция `reinterpretAsString` расширена на все типы данных, значения которых хранятся в памяти непрерывно. * Для программы `clickhouse-local` добавлена опция `--silent` для подавления вывода информации о выполнении запроса в stderr. -* Добавлена поддержка чтения `Date` в текстовом виде в формате, где месяц и день месяца могут быть указаны одной цифрой вместо двух (Amos Bird). +* Добавлена поддержка чтения `Date` в текстовом виде в формате, где месяц и день месяца могут быть указаны одной цифрой вместо двух (Amos Bird). -## Увеличение производительности: +### Увеличение производительности: * Увеличена производительность агрегатных функций `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` от строковых аргументов. * Увеличена производительность функций `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. @@ -333,7 +457,7 @@ * Уменьшено потребление памяти при `JOIN`, если левая и правая часть содержали столбцы с одинаковым именем, не входящие в `USING`. * Увеличена производительность агрегатных функций `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` за счёт уменьшения стойкости к вычислительной погрешности. Старые версии функций добавлены под именами `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. -## Исправления ошибок: +### Исправления ошибок: * Исправлена работа дедупликации блоков после `DROP` или `DETATH PARTITION`. Раньше удаление партиции и вставка тех же самых данных заново не работала, так как вставленные заново блоки считались дубликатами. * Исправлена ошибка, в связи с которой может неправильно обрабатываться `WHERE` для запросов на создание `MATERIALIZED VIEW` с указанием `POPULATE`. @@ -343,7 +467,7 @@ * Добавлена недостающая поддержка типа данных `UUID` для `DISTINCT`, `JOIN`, в агрегатных функциях `uniq` и во внешних словарях (Иванов Евгений). Поддержка `UUID` всё ещё остаётся не полной. * Исправлено поведение `SummingMergeTree` для строк, в которых все значения после суммирования равны нулю. * Многочисленные доработки для движка таблиц `Kafka` (Marek Vavruša). -* Исправлена некорректная работа движка таблиц `Join` (Amos Bird). +* Исправлена некорректная работа движка таблиц `Join` (Amos Bird). * Исправлена работа аллокатора под FreeBSD и OS X. * Функция `extractAll` теперь может доставать пустые вхождения. * Исправлена ошибка, не позволяющая подключить при сборке `libressl` вместо `openssl`. @@ -367,12 +491,12 @@ * Исправлена работа `DISTINCT` при условии, что все столбцы константные. * Исправлено форматирование запроса в случае наличия функции `tupleElement` со сложным константным выражением в качестве номера элемента. * Исправлена работа `Dictionary` таблиц для словарей типа `range_hashed`. -* Исправлена ошибка, приводящая к появлению лишних строк при `FULL` и `RIGHT JOIN` (Amos Bird). +* Исправлена ошибка, приводящая к появлению лишних строк при `FULL` и `RIGHT JOIN` (Amos Bird). * Исправлено падение сервера в случае создания и удаления временных файлов в `config.d` директориях в момент перечитывания конфигурации. * Исправлена работа запроса `SYSTEM DROP DNS CACHE`: ранее сброс DNS кэша не приводил к повторному резолвингу имён хостов кластера. * Исправлено поведение `MATERIALIZED VIEW` после `DETACH TABLE` таблицы, на которую он смотрит (Marek Vavruša). -## Улучшения сборки: +### Улучшения сборки: * Для сборки используется `pbuilder`. Сборка максимально независима от окружения на сборочной машине. * Для разных версий систем выкладывается один и тот же пакет, который совместим с широким диапазоном Linux систем. @@ -386,27 +510,27 @@ * Удалено использование расширений GNU из кода и включена опция `-Wextra`. При сборке с помощью `clang` по-умолчанию используется `libc++` вместо `libstdc++`. * Выделены библиотеки `clickhouse_parsers` и `clickhouse_common_io` для более быстрой сборки утилит. -## Обратно несовместимые изменения: +### Обратно несовместимые изменения: * Формат засечек (marks) для таблиц типа `Log`, содержащих `Nullable` столбцы, изменён обратно-несовместимым образом. В случае наличия таких таблиц, вы можете преобразовать их в `TinyLog` до запуска новой версии сервера. Для этого в соответствующем таблице файле `.sql` в директории `metadata`, замените `ENGINE = Log` на `ENGINE = TinyLog`. Если в таблице нет `Nullable` столбцов или тип таблицы не `Log`, то ничего делать не нужно. * Удалена настройка `experimental_allow_extended_storage_definition_syntax`. Соответствующая функциональность включена по-умолчанию. * Функция `runningIncome` переименована в `runningDifferenceStartingWithFirstValue` во избежание путаницы. -* Удалена возможность написания `FROM ARRAY JOIN arr` без указания таблицы после FROM (Amos Bird). +* Удалена возможность написания `FROM ARRAY JOIN arr` без указания таблицы после FROM (Amos Bird). * Удалён формат `BlockTabSeparated`, использовавшийся лишь для демонстрационных целей. * Изменён формат состояния агрегатных функций `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. Если вы использовали эти состояния для хранения в таблицах (тип данных `AggregateFunction` от этих функций или материализованные представления, хранящие эти состояния), напишите на clickhouse-feedback@yandex-team.com. * В предыдущих версиях существовала недокументированная возможность: в типе данных AggregateFunction можно было не указывать параметры для агрегатной функции, которая зависит от параметров. Пример: `AggregateFunction(quantiles, UInt64)` вместо `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. Эта возможность потеряна. Не смотря на то, что возможность не документирована, мы собираемся вернуть её в ближайших релизах. * Значения типа данных Enum не могут быть переданы в агрегатные функции min/max. Возможность будет возвращена обратно в следующем релизе. -## На что обратить внимание при обновлении: +### На что обратить внимание при обновлении: * При обновлении кластера, на время, когда на одних репликах работает новая версия сервера, а на других - старая, репликация будет приостановлена и в логе появятся сообщения вида `unknown parameter 'shard'`. Репликация продолжится после обновления всех реплик кластера. * Если на серверах кластера работают разные версии ClickHouse, то возможен неправильный результат распределённых запросов, использующих функции `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. Необходимо обновить все серверы кластера. -# Релиз ClickHouse 1.1.54327, 2017-12-21 +## Релиз ClickHouse 1.1.54327, 2017-12-21 Релиз содержит исправление к предыдущему релизу 1.1.54318: * Исправлена проблема с возможным race condition при репликации, которая может приводить к потере данных. Проблеме подвержены версии 1.1.54310 и 1.1.54318. Если вы их используете и у вас есть Replicated таблицы, то обновление обязательно. Понять, что эта проблема существует, можно по сообщениям в логе Warning вида `Part ... from own log doesn't exist.` Даже если таких сообщений нет, проблема всё-равно актуальна. -# Релиз ClickHouse 1.1.54318, 2017-11-30 +## Релиз ClickHouse 1.1.54318, 2017-11-30 Релиз содержит изменения к предыдущему релизу 1.1.54310 с исправлением следующих багов: * Исправлено некорректное удаление строк при слияниях в движке SummingMergeTree @@ -415,9 +539,9 @@ * Исправлена проблема, приводящая к остановке выполнения очереди репликации * Исправлено ротирование и архивация логов сервера -# Релиз ClickHouse 1.1.54310, 2017-11-01 +## Релиз ClickHouse 1.1.54310, 2017-11-01 -## Новые возможности: +### Новые возможности: * Произвольный ключ партиционирования для таблиц семейства MergeTree. * Движок таблиц [Kafka](https://clickhouse.yandex/docs/en/single/index.html#document-table_engines/kafka). * Возможность загружать модели [CatBoost](https://catboost.yandex/) и применять их к данным, хранящимся в ClickHouse. @@ -433,12 +557,12 @@ * Поддержка входного формата Cap’n Proto. * Возможность задавать уровень сжатия при использовании алгоритма zstd. -## Обратно несовместимые изменения: +### Обратно несовместимые изменения: * Запрещено создание временных таблиц с движком, отличным от Memory. * Запрещено явное создание таблиц с движком View и MaterializedView. * При создании таблицы теперь проверяется, что ключ сэмплирования входит в первичный ключ. -## Исправления ошибок: +### Исправления ошибок: * Исправлено зависание при синхронной вставке в Distributed таблицу. * Исправлена неатомарность при добавлении/удалении кусков в реплицированных таблицах. * Данные, вставляемые в материализованное представление, теперь не подвергаются излишней дедупликации. @@ -448,14 +572,14 @@ * Исправлено зависание при недостатке места на диске в разделе с логами. * Исправлено переполнение в функции toRelativeWeekNum для первой недели Unix-эпохи. -## Улучшения сборки: +### Улучшения сборки: * Несколько сторонних библиотек (в частности, Poco) обновлены и переведены на git submodules. -# Релиз ClickHouse 1.1.54304, 2017-10-19 -## Новые возможности: +## Релиз ClickHouse 1.1.54304, 2017-10-19 +### Новые возможности: * Добавлена поддержка TLS в нативном протоколе (включается заданием `tcp_ssl_port` в `config.xml`) -## Исправления ошибок: +### Исправления ошибок: * `ALTER` для реплицированных таблиц теперь пытается начать выполнение как можно быстрее * Исправлены падения при чтении данных с настройкой `preferred_block_size_bytes=0` * Исправлено падение `clickhouse-client` при нажатии `Page Down` @@ -468,16 +592,16 @@ * Корректное обновление пользователей при невалидном `users.xml` * Корректная обработка случаев, когда executable-словарь возвращает ненулевой код ответа -# Релиз ClickHouse 1.1.54292, 2017-09-20 +## Релиз ClickHouse 1.1.54292, 2017-09-20 -## Новые возможности: +### Новые возможности: * Добавлена функция `pointInPolygon` для работы с координатами на плоскости. * Добавлена агрегатная функция `sumMap`, обеспечивающая суммирование массивов аналогично `SummingMergeTree`. * Добавлена функция `trunc`. Увеличена производительность функций округления `round`, `floor`, `ceil`, `roundToExp2`. Исправлена логика работы функций округления. Изменена логика работы функции `roundToExp2` для дробных и отрицательных чисел. * Ослаблена зависимость исполняемого файла ClickHouse от версии libc. Один и тот же исполняемый файл ClickHouse может запускаться и работать на широком множестве Linux систем. Замечание: зависимость всё ещё присутствует при использовании скомпилированных запросов (настройка `compile = 1`, по-умолчанию не используется). * Уменьшено время динамической компиляции запросов. -## Исправления ошибок: +### Исправления ошибок: * Исправлена ошибка, которая могла приводить к сообщениям `part ... intersects previous part` и нарушению консистентности реплик. * Исправлена ошибка, приводящая к блокировке при завершении работы сервера, если в это время ZooKeeper недоступен. * Удалено избыточное логгирование при восстановлении реплик. @@ -485,9 +609,9 @@ * Исправлена ошибка в функции concat, возникающая в случае, если первый столбец блока имеет тип Array. * Исправлено отображение прогресса в таблице system.merges. -# Релиз ClickHouse 1.1.54289, 2017-09-13 +## Релиз ClickHouse 1.1.54289, 2017-09-13 -## Новые возможности: +### Новые возможности: * Запросы `SYSTEM` для административных действий с сервером: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. * Добавлены функции для работы с массивами: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. * Добавлены параметры `root` и `identity` для конфигурации ZooKeeper. Это позволяет изолировать разных пользователей одного ZooKeeper кластера. @@ -502,7 +626,7 @@ * Возможность задать `umask` в конфигурационном файле. * Увеличена производительность запросов с `DISTINCT`. -## Исправления ошибок: +### Исправления ошибок: * Более оптимальная процедура удаления старых нод в ZooKeeper. Ранее в случае очень частых вставок, старые ноды могли не успевать удаляться, что приводило, в том числе, к очень долгому завершению сервера. * Исправлена рандомизация при выборе хостов для соединения с ZooKeeper. * Исправлено отключение отстающей реплики при распределённых запросах, если реплика является localhost. @@ -515,28 +639,28 @@ * Исправлено появление zombie процессов при работе со словарём с источником `executable`. * Исправлен segfault при запросе HEAD. -## Улучшения процесса разработки и сборки ClickHouse: +### Улучшения процесса разработки и сборки ClickHouse: * Возможность сборки с помощью `pbuilder`. * Возможность сборки с использованием `libc++` вместо `libstdc++` под Linux. * Добавлены инструкции для использования статических анализаторов кода `Coverity`, `clang-tidy`, `cppcheck`. -## На что обратить внимание при обновлении: +### На что обратить внимание при обновлении: * Увеличено значение по-умолчанию для настройки MergeTree `max_bytes_to_merge_at_max_space_in_pool` (максимальный суммарный размер кусков в байтах для мержа) со 100 GiB до 150 GiB. Это может привести к запуску больших мержей после обновления сервера, что может вызвать повышенную нагрузку на дисковую подсистему. Если же на серверах, где это происходит, количество свободного места менее чем в два раза больше суммарного объёма выполняющихся мержей, то в связи с этим перестанут выполняться какие-либо другие мержи, включая мержи мелких кусков. Это приведёт к тому, что INSERT-ы будут отклоняться с сообщением "Merges are processing significantly slower than inserts". Для наблюдения, используйте запрос `SELECT * FROM system.merges`. Вы также можете смотреть на метрику `DiskSpaceReservedForMerge` в таблице `system.metrics` или в Graphite. Для исправления этой ситуации можно ничего не делать, так как она нормализуется сама после завершения больших мержей. Если же вас это не устраивает, вы можете вернуть настройку `max_bytes_to_merge_at_max_space_in_pool` в старое значение, прописав в config.xml в секции `` `107374182400` и перезапустить сервер. -# Релиз ClickHouse 1.1.54284, 2017-08-29 +## Релиз ClickHouse 1.1.54284, 2017-08-29 * Релиз содержит изменения к предыдущему релизу 1.1.54282, которые исправляют утечку записей о кусках в ZooKeeper -# Релиз ClickHouse 1.1.54282, 2017-08-23 +## Релиз ClickHouse 1.1.54282, 2017-08-23 Релиз содержит исправления к предыдущему релизу 1.1.54276: * Исправлена ошибка `DB::Exception: Assertion violation: !_path.empty()` при вставке в Distributed таблицу. * Исправлен парсинг при вставке в формате RowBinary, если входные данные начинаются с ';'. * Исправлена ошибка при рантайм-компиляции некоторых агрегатных функций (например, `groupArray()`). -# Релиз ClickHouse 1.1.54276, 2017-08-16 +## Релиз ClickHouse 1.1.54276, 2017-08-16 -## Новые возможности: +### Новые возможности: * Добавлена опциональная секция WITH запроса SELECT. Пример запроса: `WITH 1+1 AS a SELECT a, a*a` * Добавлена возможность синхронной вставки в Distributed таблицу: выдается Ok только после того как все данные записались на все шарды. Активируется настройкой insert_distributed_sync=1 * Добавлен тип данных UUID для работы с 16-байтовыми идентификаторами @@ -546,17 +670,17 @@ * Добавлена поддержка неконстантных аргументов и отрицательных смещений в функции `substring(str, pos, len)` * Добавлен параметр max_size для агрегатной функции `groupArray(max_size)(column)`, и оптимизирована её производительность -## Основные изменения: +### Основные изменения: * Улучшение безопасности: все файлы сервера создаются с правами 0640 (можно поменять, через параметр в конфиге). * Улучшены сообщения об ошибках в случае синтаксически неверных запросов * Значительно уменьшен расход оперативной памяти и улучшена производительность слияний больших MergeTree-кусков данных * Значительно увеличена производительность слияний данных для движка ReplacingMergeTree * Улучшена производительность асинхронных вставок из Distributed таблицы за счет объединения нескольких исходных вставок. Функциональность включается настройкой distributed_directory_monitor_batch_inserts=1. -## Обратно несовместимые изменения: +### Обратно несовместимые изменения: * Изменился бинарный формат агрегатных состояний функции `groupArray(array_column)` для массивов -## Полный список изменений: +### Полный список изменений: * Добавлена настройка `output_format_json_quote_denormals`, включающая вывод nan и inf значений в формате JSON * Более оптимальное выделение потоков при чтении из Distributed таблиц * Разрешено задавать настройки в режиме readonly, если их значение не изменяется @@ -574,7 +698,7 @@ * Возможность подключения к MySQL через сокет на файловой системе * В таблицу system.parts добавлен столбец с информацией о размере marks в байтах -## Исправления багов: +### Исправления багов: * Исправлена некорректная работа Distributed таблиц, использующих Merge таблицы, при SELECT с условием на поле _table * Исправлен редкий race condition в ReplicatedMergeTree при проверке кусков данных * Исправлено возможное зависание процедуры leader election при старте сервера @@ -597,15 +721,15 @@ * Исправлена ошибка "Cannot mremap" при использовании множеств в секциях IN, JOIN, содержащих более 2 млрд. элементов * Исправлен failover для словарей с источником MySQL -## Улучшения процесса разработки и сборки ClickHouse: +### Улучшения процесса разработки и сборки ClickHouse: * Добавлена возмозможность сборки в Arcadia * Добавлена возможность сборки с помощью gcc 7 * Ускорена параллельная сборка с помощью ccache+distcc -# Релиз ClickHouse 1.1.54245, 2017-07-04 +## Релиз ClickHouse 1.1.54245, 2017-07-04 -## Новые возможности: +### Новые возможности: * Распределённые DDL (например, `CREATE TABLE ON CLUSTER`) * Реплицируемый запрос `ALTER TABLE CLEAR COLUMN IN PARTITION` * Движок таблиц Dictionary (доступ к данным словаря в виде таблицы) @@ -616,14 +740,14 @@ * Сессии в HTTP интерфейсе * Запрос OPTIMIZE для Replicated таблицы теперь можно выполнять не только на лидере -## Обратно несовместимые изменения: +### Обратно несовместимые изменения: * Убрана команда SET GLOBAL -## Мелкие изменения: +### Мелкие изменения: * Теперь после получения сигнала в лог печатается полный стектрейс * Ослаблена проверка на количество повреждённых/лишних кусков при старте (было слишком много ложных срабатываний) -## Исправления багов: +### Исправления багов: * Исправлено залипание плохого соединения при вставке в Distributed таблицу * GLOBAL IN теперь работает при запросе из таблицы Merge, смотрящей в Distributed * Теперь правильно определяется количество ядер на виртуалках Google Compute Engine diff --git a/CMakeLists.txt b/CMakeLists.txt index d193861d118..8fff4641e24 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,16 +34,16 @@ endif () string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) message (STATUS "CMAKE_BUILD_TYPE: " ${CMAKE_BUILD_TYPE} ) -# ASan - build type with address sanitizer -# UBSan - build type with undefined behaviour sanitizer -# TSan is not supported due to false positive errors in libstdc++ and necessity to rebuild libstdc++ with TSan -set (CMAKE_CONFIGURATION_TYPES "RelWithDebInfo;Debug;Release;MinSizeRel;ASan;UBSan" CACHE STRING "" FORCE) +set (CMAKE_CONFIGURATION_TYPES "RelWithDebInfo;Debug;Release;MinSizeRel" CACHE STRING "" FORCE) + +include (cmake/sanitize.cmake) include (cmake/arch.cmake) if (CMAKE_GENERATOR STREQUAL "Ninja") # Turn on colored output. https://github.com/ninja-build/ninja/wiki/FAQ - set (COMPILER_FLAGS "${COMPILER_FLAGS} -fdiagnostics-color=always") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-color=always") endif () if (NOT MSVC) @@ -60,12 +60,8 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set (COMMON_WARNING_FLAGS "${COMMON_WARNING_FLAGS} -Wno-unused-command-line-argument") endif () -if (ARCH_LINUX) - set (CXX11_ABI "ENABLE" CACHE STRING "Use C++11 ABI: DEFAULT, ENABLE, DISABLE") -endif () - option (TEST_COVERAGE "Enables flags for test coverage" OFF) -option (ENABLE_TESTS "Enables tests" ${NOT_MSVC}) +option (ENABLE_TESTS "Enables tests" ON) option (USE_STATIC_LIBRARIES "Set to FALSE to use shared libraries" ON) option (MAKE_STATIC_LIBRARIES "Set to FALSE to make shared libraries" ${USE_STATIC_LIBRARIES}) @@ -85,7 +81,7 @@ endif () if (CMAKE_LIBRARY_ARCHITECTURE MATCHES "amd64.*|x86_64.*|AMD64.*") option (USE_INTERNAL_MEMCPY "Use internal implementation of 'memcpy' function instead of provided by libc. Only for x86_64." ON) - if (ARCH_LINUX) + if (OS_LINUX) option (GLIBC_COMPATIBILITY "Set to TRUE to enable compatibility with older glibc libraries. Only for x86_64, Linux. Implies USE_INTERNAL_MEMCPY." ON) endif() endif () @@ -94,15 +90,7 @@ if (GLIBC_COMPATIBILITY) set (USE_INTERNAL_MEMCPY ON) endif () -if (CXX11_ABI STREQUAL ENABLE) - set (CXX11_ABI_FLAGS "-D_GLIBCXX_USE_CXX11_ABI=1") -elseif (CXX11_ABI STREQUAL DISABLE) - set (CXX11_ABI_FLAGS "-D_GLIBCXX_USE_CXX11_ABI=0") -else () - set (CXX11_ABI_FLAGS "") -endif () - -set (COMPILER_FLAGS "${COMPILER_FLAGS} ${CXX11_ABI_FLAGS}") +set (COMPILER_FLAGS "${COMPILER_FLAGS}") string(REGEX MATCH "-?[0-9]+(.[0-9]+)?$" COMPILER_POSTFIX ${CMAKE_CXX_COMPILER}) @@ -149,26 +137,29 @@ else () endif () set (CMAKE_BUILD_COLOR_MAKEFILE ON) -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS} ${GLIBC_COMPATIBILITY_COMPILE_FLAGS}") +set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS}") #set (CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_CXX_FLAGS_ADD}") -set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${GLIBC_COMPATIBILITY_COMPILE_FLAGS} ${CMAKE_C_FLAGS_ADD}") +set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CMAKE_C_FLAGS_ADD}") #set (CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}") -if (MAKE_STATIC_LIBRARIES AND NOT APPLE AND NOT (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND ARCH_FREEBSD)) +if (MAKE_STATIC_LIBRARIES AND NOT APPLE AND NOT (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND OS_FREEBSD)) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libgcc -static-libstdc++") + + # Along with executables, we also build example of shared library for "library dictionary source"; and it also should be self-contained. + set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -static-libgcc -static-libstdc++") endif () set(THREADS_PREFER_PTHREAD_FLAG ON) include (cmake/test_compiler.cmake) -if (ARCH_LINUX AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${GLIBC_COMPATIBILITY_LINK_FLAGS} ${CXX11_ABI_FLAGS}") +if (OS_LINUX AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}") option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++ (only make sense on Linux with Clang)" ${HAVE_LIBCXX}) set (LIBCXX_PATH "" CACHE STRING "Use custom path for libc++. It should be used for MSan.") @@ -183,7 +174,7 @@ if (ARCH_LINUX AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang") endif () if (LIBCXX_PATH) -# include_directories (BEFORE SYSTEM "${LIBCXX_PATH}/include" "${LIBCXX_PATH}/include/c++/v1") +# include_directories (SYSTEM BEFORE "${LIBCXX_PATH}/include" "${LIBCXX_PATH}/include/c++/v1") link_directories ("${LIBCXX_PATH}/lib") endif () endif () @@ -198,8 +189,6 @@ if (NOT MAKE_STATIC_LIBRARIES) set(CMAKE_POSITION_INDEPENDENT_CODE ON) endif () -include (cmake/sanitize.cmake) - # Using "include-what-you-use" tool. option (USE_INCLUDE_WHAT_YOU_USE "Use 'include-what-you-use' tool" OFF) if (USE_INCLUDE_WHAT_YOU_USE) @@ -236,7 +225,7 @@ else () set(NOT_UNBUNDLED 1) endif () # Using system libs can cause lot of warnings in includes. -if (UNBUNDLED OR NOT (ARCH_LINUX OR APPLE) OR ARCH_32) +if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32) option (NO_WERROR "Disable -Werror compiler option" ON) endif () @@ -245,24 +234,15 @@ message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE include(GNUInstallDirs) include (cmake/find_ssl.cmake) -if (NOT OPENSSL_FOUND) - message (FATAL_ERROR "Need openssl for build. debian tip: sudo apt install libssl-dev") -endif () - include (cmake/lib_name.cmake) include (cmake/find_icu4c.cmake) include (cmake/find_boost.cmake) -# openssl, zlib before poco include (cmake/find_zlib.cmake) include (cmake/find_zstd.cmake) include (cmake/find_ltdl.cmake) # for odbc include (cmake/find_termcap.cmake) -if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/poco/cmake/FindODBC.cmake) - include (${CMAKE_CURRENT_SOURCE_DIR}/contrib/poco/cmake/FindODBC.cmake) # for poco -else () - include (cmake/find_odbc.cmake) -endif () -message (STATUS "Using odbc: ${ODBC_INCLUDE_DIRECTORIES} : ${ODBC_LIBRARIES}") +include (cmake/find_odbc.cmake) +# openssl, zlib, odbc before poco include (cmake/find_poco.cmake) include (cmake/find_lz4.cmake) include (cmake/find_sparsehash.cmake) @@ -274,6 +254,9 @@ include (cmake/find_rdkafka.cmake) include (cmake/find_capnp.cmake) include (cmake/find_llvm.cmake) include (cmake/find_cpuid.cmake) +if (ENABLE_TESTS) + include (cmake/find_gtest.cmake) +endif () include (cmake/find_contrib_lib.cmake) find_contrib_lib(cityhash) diff --git a/MacOS.md b/MacOS.md deleted file mode 100644 index 93c04d7c2de..00000000000 --- a/MacOS.md +++ /dev/null @@ -1,39 +0,0 @@ -## How to increase maxfiles on macOS - -To increase maxfiles on macOS, create the following file: - -(Note: you'll need to use sudo) - -/Library/LaunchDaemons/limit.maxfiles.plist: -``` - - - - - Label - limit.maxfiles - ProgramArguments - - launchctl - limit - maxfiles - 524288 - 524288 - - RunAtLoad - - ServiceIPC - - - -``` - -Execute the following command: -``` -sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist -``` - -Reboot. - -To check if it's working, you can use `ulimit -n` command. diff --git a/README.md b/README.md index e5d998b54e7..8cb9fa3379e 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,13 @@ # ClickHouse + ClickHouse is an open-source column-oriented database management system that allows generating analytical data reports in real time. -Learn more about ClickHouse at [https://clickhouse.yandex/](https://clickhouse.yandex/) - [![Build Status](https://travis-ci.org/yandex/ClickHouse.svg?branch=master)](https://travis-ci.org/yandex/ClickHouse) + +## Useful links + +* [Official website](https://clickhouse.yandex/) has quick high-level overview of ClickHouse on main page. +* [Tutorial](https://clickhouse.yandex/tutorial.html) shows how to set up and query small ClickHouse cluster. +* [Documentation](https://clickhouse.yandex/docs/en/) provides more in-depth information. +* [Contacts](https://clickhouse.yandex/#contacts) can help to get your questions answered if there are any. + diff --git a/ci/install-libraries.sh b/ci/install-libraries.sh index 4868221b342..d7fb856dbed 100755 --- a/ci/install-libraries.sh +++ b/ci/install-libraries.sh @@ -3,11 +3,8 @@ set -e -x source default-config -./install-os-packages.sh libssl-dev ./install-os-packages.sh libicu-dev ./install-os-packages.sh libreadline-dev -./install-os-packages.sh libmariadbclient-dev -./install-os-packages.sh libunixodbc-dev if [[ "$ENABLE_EMBEDDED_COMPILER" == 1 && "$USE_LLVM_LIBRARIES_FROM_SYSTEM" == 1 ]]; then ./install-os-packages.sh llvm-libs-5.0 diff --git a/ci/install-os-packages.sh b/ci/install-os-packages.sh index 4aae6268aa1..fe5b4f84833 100755 --- a/ci/install-os-packages.sh +++ b/ci/install-os-packages.sh @@ -43,21 +43,12 @@ case $PACKAGE_MANAGER in jq) $SUDO apt-get install -y jq ;; - libssl-dev) - $SUDO apt-get install -y libssl-dev - ;; libicu-dev) $SUDO apt-get install -y libicu-dev ;; libreadline-dev) $SUDO apt-get install -y libreadline-dev ;; - libunixodbc-dev) - $SUDO apt-get install -y unixodbc-dev - ;; - libmariadbclient-dev) - $SUDO apt-get install -y libmariadbclient-dev - ;; llvm-libs*) $SUDO apt-get install -y ${WHAT/llvm-libs/liblld}-dev ${WHAT/llvm-libs/libclang}-dev ;; @@ -97,22 +88,12 @@ case $PACKAGE_MANAGER in jq) $SUDO yum install -y jq ;; - libssl-dev) - $SUDO yum install -y openssl-devel - ;; libicu-dev) $SUDO yum install -y libicu-devel ;; libreadline-dev) $SUDO yum install -y readline-devel ;; - libunixodbc-dev) - $SUDO yum install -y unixODBC-devel libtool-ltdl-devel - ;; - libmariadbclient-dev) - echo "There is no package with static mysqlclient library"; echo 1; - #$SUDO yum install -y mariadb-connector-c-devel - ;; *) echo "Unknown package"; exit 1; ;; @@ -146,21 +127,12 @@ case $PACKAGE_MANAGER in jq) $SUDO pkg install -y jq ;; - libssl-dev) - $SUDO pkg install -y openssl - ;; libicu-dev) $SUDO pkg install -y icu ;; libreadline-dev) $SUDO pkg install -y readline ;; - libunixodbc-dev) - $SUDO pkg install -y unixODBC libltdl - ;; - libmariadbclient-dev) - $SUDO pkg install -y mariadb102-client - ;; *) echo "Unknown package"; exit 1; ;; diff --git a/ci/jobs/quick-build/run.sh b/ci/jobs/quick-build/run.sh index 5fe57457645..6a948c560ee 100755 --- a/ci/jobs/quick-build/run.sh +++ b/ci/jobs/quick-build/run.sh @@ -21,7 +21,7 @@ BUILD_TARGETS=clickhouse BUILD_TYPE=Debug ENABLE_EMBEDDED_COMPILER=0 -CMAKE_FLAGS="-D CMAKE_C_FLAGS_ADD=-g0 -D CMAKE_CXX_FLAGS_ADD=-g0 -D ENABLE_TCMALLOC=0 -D ENABLE_CAPNP=0 -D ENABLE_RDKAFKA=0 -D ENABLE_UNWIND=0 -D ENABLE_ICU=0 -D ENABLE_POCO_MONGODB=0 -D ENABLE_POCO_NETSSL=0 -D ENABLE_POCO_ODBC=0 -D ENABLE_MYSQL=0" +CMAKE_FLAGS="-D CMAKE_C_FLAGS_ADD=-g0 -D CMAKE_CXX_FLAGS_ADD=-g0 -D ENABLE_JEMALLOC=0 -D ENABLE_CAPNP=0 -D ENABLE_RDKAFKA=0 -D ENABLE_UNWIND=0 -D ENABLE_ICU=0 -D ENABLE_POCO_MONGODB=0 -D ENABLE_POCO_NETSSL=0 -D ENABLE_POCO_ODBC=0 -D ENABLE_ODBC=0 -D ENABLE_MYSQL=0" [[ $(uname) == "FreeBSD" ]] && COMPILER_PACKAGE_VERSION=devel && export COMPILER_PATH=/usr/local/bin diff --git a/cmake/Modules/FindODBC.cmake b/cmake/Modules/FindODBC.cmake new file mode 100644 index 00000000000..66d43e93d2d --- /dev/null +++ b/cmake/Modules/FindODBC.cmake @@ -0,0 +1,88 @@ +# This file copied from contrib/poco/cmake/FindODBC.cmake to allow build without submodules + +# +# Find the ODBC driver manager includes and library. +# +# ODBC is an open standard for connecting to different databases in a +# semi-vendor-independent fashion. First you install the ODBC driver +# manager. Then you need a driver for each separate database you want +# to connect to (unless a generic one works). VTK includes neither +# the driver manager nor the vendor-specific drivers: you have to find +# those yourself. +# +# This module defines +# ODBC_INCLUDE_DIRECTORIES, where to find sql.h +# ODBC_LIBRARIES, the libraries to link against to use ODBC +# ODBC_FOUND. If false, you cannot build anything that requires ODBC. + +option (ENABLE_ODBC "Enable ODBC" ${OS_LINUX}) +if (OS_LINUX) + option (USE_INTERNAL_ODBC_LIBRARY "Set to FALSE to use system odbc library instead of bundled" ${NOT_UNBUNDLED}) +else () + option (USE_INTERNAL_ODBC_LIBRARY "Set to FALSE to use system odbc library instead of bundled" OFF) +endif () + +if (USE_INTERNAL_ODBC_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/unixodbc/README") + message (WARNING "submodule contrib/unixodbc is missing. to fix try run: \n git submodule update --init --recursive") + set (USE_INTERNAL_ODBC_LIBRARY 0) +endif () + +if (ENABLE_ODBC) + if (USE_INTERNAL_ODBC_LIBRARY) + set (ODBC_LIBRARIES unixodbc) + set (ODBC_INCLUDE_DIRECTORIES ${CMAKE_SOURCE_DIR}/contrib/unixodbc/include) + set (ODBC_FOUND 1) + set (USE_ODBC 1) + else () + find_path(ODBC_INCLUDE_DIRECTORIES + NAMES sql.h + HINTS + /usr/include + /usr/include/iodbc + /usr/include/odbc + /usr/local/include + /usr/local/include/iodbc + /usr/local/include/odbc + /usr/local/iodbc/include + /usr/local/odbc/include + "C:/Program Files/ODBC/include" + "C:/Program Files/Microsoft SDKs/Windows/v7.0/include" + "C:/Program Files/Microsoft SDKs/Windows/v6.0a/include" + "C:/ODBC/include" + DOC "Specify the directory containing sql.h." + ) + + find_library(ODBC_LIBRARIES + NAMES iodbc odbc iodbcinst odbcinst odbc32 + HINTS + /usr/lib + /usr/lib/iodbc + /usr/lib/odbc + /usr/local/lib + /usr/local/lib/iodbc + /usr/local/lib/odbc + /usr/local/iodbc/lib + /usr/local/odbc/lib + "C:/Program Files/ODBC/lib" + "C:/ODBC/lib/debug" + "C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/Lib" + DOC "Specify the ODBC driver manager library here." + ) + + # MinGW find usually fails + if(MINGW) + set(ODBC_INCLUDE_DIRECTORIES ".") + set(ODBC_LIBRARIES odbc32) + endif() + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(ODBC + DEFAULT_MSG + ODBC_INCLUDE_DIRECTORIES + ODBC_LIBRARIES) + + mark_as_advanced(ODBC_FOUND ODBC_LIBRARIES ODBC_INCLUDE_DIRECTORIES) + endif () +endif () + +message (STATUS "Using odbc: ${ODBC_INCLUDE_DIRECTORIES} : ${ODBC_LIBRARIES}") diff --git a/cmake/arch.cmake b/cmake/arch.cmake index ba446d95676..46d2e9f3ed6 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -11,19 +11,12 @@ if ( ( ARCH_ARM AND NOT ARCH_AARCH64 ) OR ARCH_I386) set (ARCH_32 1) message (WARNING "Support for 32bit platforms is highly experimental") endif () + if (CMAKE_SYSTEM MATCHES "Linux") - set (ARCH_LINUX 1) + set (OS_LINUX 1) endif () if (CMAKE_SYSTEM MATCHES "FreeBSD") - set (ARCH_FREEBSD 1) -endif () - -if (NOT MSVC) - set (NOT_MSVC 1) -endif () - -if (NOT APPLE) - set (NOT_APPLE 1) + set (OS_FREEBSD 1) endif () if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") @@ -31,3 +24,7 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set (COMPILER_CLANG 1) endif () + +if (OS_LINUX AND CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64") + set (OS_LINUX_X86_64 1) +endif () diff --git a/cmake/find_capnp.cmake b/cmake/find_capnp.cmake index 03ecadda6a1..6c064112686 100644 --- a/cmake/find_capnp.cmake +++ b/cmake/find_capnp.cmake @@ -1,4 +1,4 @@ -option (ENABLE_CAPNP "Enable Cap'n Proto" ${NOT_MSVC}) +option (ENABLE_CAPNP "Enable Cap'n Proto" ON) if (ENABLE_CAPNP) # cmake 3.5.1 bug: diff --git a/cmake/find_cpuid.cmake b/cmake/find_cpuid.cmake index d486e0fb2a3..d02336021bb 100644 --- a/cmake/find_cpuid.cmake +++ b/cmake/find_cpuid.cmake @@ -1,4 +1,14 @@ -option (USE_INTERNAL_CPUID_LIBRARY "Set to FALSE to use system cpuid library instead of bundled" ${NOT_UNBUNDLED}) +# Freebsd: /usr/local/include/libcpuid/libcpuid_types.h:61:29: error: conflicting declaration 'typedef long long int int64_t' +# TODO: test new libcpuid - maybe already fixed + +if (NOT ARCH_ARM) + if (OS_FREEBSD) + set (DEFAULT_USE_INTERNAL_CPUID_LIBRARY 1) + else () + set (DEFAULT_USE_INTERNAL_CPUID_LIBRARY ${NOT_UNBUNDLED}) + endif () + option (USE_INTERNAL_CPUID_LIBRARY "Set to FALSE to use system cpuid library instead of bundled" ${DEFAULT_USE_INTERNAL_CPUID_LIBRARY}) +endif () #if (USE_INTERNAL_CPUID_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libcpuid/include/cpuid/libcpuid.h") # message (WARNING "submodule contrib/libcpuid is missing. to fix try run: \n git submodule update --init --recursive") diff --git a/cmake/find_execinfo.cmake b/cmake/find_execinfo.cmake index 05dd72dbb3d..650d279983c 100644 --- a/cmake/find_execinfo.cmake +++ b/cmake/find_execinfo.cmake @@ -1,4 +1,4 @@ -if (ARCH_FREEBSD) +if (OS_FREEBSD) find_library (EXECINFO_LIBRARY execinfo) find_library (ELF_LIBRARY elf) message (STATUS "Using execinfo: ${EXECINFO_LIBRARY}") diff --git a/cmake/find_llvm.cmake b/cmake/find_llvm.cmake index 6e45f715552..d9805b03303 100644 --- a/cmake/find_llvm.cmake +++ b/cmake/find_llvm.cmake @@ -24,6 +24,15 @@ if (ENABLE_EMBEDDED_COMPILER) endif () endif () + if (LLVM_FOUND) + find_library (LLD_LIBRARY_TEST lldCore PATHS ${LLVM_LIBRARY_DIRS}) + find_path (LLD_INCLUDE_DIR_TEST NAMES lld/Core/AbsoluteAtom.h PATHS ${LLVM_INCLUDE_DIRS}) + if (NOT LLD_LIBRARY_TEST OR NOT LLD_INCLUDE_DIR_TEST) + set (LLVM_FOUND 0) + message(WARNING "liblld (${LLD_LIBRARY_TEST}, ${LLD_INCLUDE_DIR_TEST}) not found in ${LLVM_INCLUDE_DIRS} ${LLVM_LIBRARY_DIRS}. Disabling internal compiler.") + endif () + endif () + if (LLVM_FOUND) # Remove dynamically-linked zlib and libedit from LLVM's dependencies: set_target_properties(LLVMSupport PROPERTIES INTERFACE_LINK_LIBRARIES "-lpthread;LLVMDemangle;${ZLIB_LIBRARIES}") @@ -34,6 +43,12 @@ if (ENABLE_EMBEDDED_COMPILER) else() set (USE_EMBEDDED_COMPILER 0) endif() + + if (LLVM_FOUND AND OS_LINUX AND USE_LIBCXX) + message(WARNING "Option USE_INTERNAL_LLVM_LIBRARY is not set but the LLVM library from OS packages in Linux is incompatible with libc++ ABI. LLVM Will be disabled.") + set (LLVM_FOUND 0) + set (USE_EMBEDDED_COMPILER 0) + endif () else() set (LLVM_FOUND 1) set (USE_EMBEDDED_COMPILER 1) diff --git a/cmake/find_ltdl.cmake b/cmake/find_ltdl.cmake index 935de0d4124..18003618dbd 100644 --- a/cmake/find_ltdl.cmake +++ b/cmake/find_ltdl.cmake @@ -1,3 +1,5 @@ -set (LTDL_PATHS "/usr/local/opt/libtool/lib") -find_library (LTDL_LIBRARY ltdl PATHS ${LTDL_PATHS}) -message (STATUS "Using ltdl: ${LTDL_LIBRARY}") +if (ENABLE_ODBC AND NOT USE_INTERNAL_ODBC_LIBRARY) + set (LTDL_PATHS "/usr/local/opt/libtool/lib") + find_library (LTDL_LIBRARY ltdl PATHS ${LTDL_PATHS}) + message (STATUS "Using ltdl: ${LTDL_LIBRARY}") +endif () diff --git a/cmake/find_odbc.cmake b/cmake/find_odbc.cmake index 338108910bf..95acf40b2b4 100644 --- a/cmake/find_odbc.cmake +++ b/cmake/find_odbc.cmake @@ -13,54 +13,77 @@ # This module defines # ODBC_INCLUDE_DIRECTORIES, where to find sql.h # ODBC_LIBRARIES, the libraries to link against to use ODBC -# ODBC_FOUND. If false, you cannot build anything that requires MySQL. +# ODBC_FOUND. If false, you cannot build anything that requires ODBC. -find_path(ODBC_INCLUDE_DIRECTORIES - NAMES sql.h - HINTS - /usr/include - /usr/include/odbc - /usr/include/iodbc - /usr/local/include - /usr/local/include/odbc - /usr/local/include/iodbc - /usr/local/odbc/include - /usr/local/iodbc/include - "C:/Program Files/ODBC/include" - "C:/Program Files/Microsoft SDKs/Windows/v7.0/include" - "C:/Program Files/Microsoft SDKs/Windows/v6.0a/include" - "C:/ODBC/include" - DOC "Specify the directory containing sql.h." -) +option (ENABLE_ODBC "Enable ODBC" ${OS_LINUX}) +if (OS_LINUX) + option (USE_INTERNAL_ODBC_LIBRARY "Set to FALSE to use system odbc library instead of bundled" ${NOT_UNBUNDLED}) +else () + option (USE_INTERNAL_ODBC_LIBRARY "Set to FALSE to use system odbc library instead of bundled" OFF) +endif () -find_library(ODBC_LIBRARIES - NAMES iodbc odbc iodbcinst odbcinst odbc32 - HINTS - /usr/lib - /usr/lib/odbc - /usr/lib/iodbc - /usr/local/lib - /usr/local/lib/odbc - /usr/local/lib/iodbc - /usr/local/odbc/lib - /usr/local/iodbc/lib - "C:/Program Files/ODBC/lib" - "C:/ODBC/lib/debug" - "C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/Lib" - DOC "Specify the ODBC driver manager library here." -) +if (USE_INTERNAL_ODBC_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/unixodbc/README") + message (WARNING "submodule contrib/unixodbc is missing. to fix try run: \n git submodule update --init --recursive") + set (USE_INTERNAL_ODBC_LIBRARY 0) +endif () -# MinGW find usually fails -if(MINGW) - set(ODBC_INCLUDE_DIRECTORIES ".") - set(ODBC_LIBRARIES odbc32) -endif() - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(ODBC - DEFAULT_MSG - ODBC_INCLUDE_DIRECTORIES - ODBC_LIBRARIES - ) +set (ODBC_INCLUDE_DIRECTORIES ) # Include directories will be either used automatically by target_include_directories or set later. -mark_as_advanced(ODBC_FOUND ODBC_LIBRARIES ODBC_INCLUDE_DIRECTORIES) +if (ENABLE_ODBC) + if (USE_INTERNAL_ODBC_LIBRARY) + set (ODBC_LIBRARIES unixodbc) + set (ODBC_FOUND 1) + set (USE_ODBC 1) + else () + find_path(ODBC_INCLUDE_DIRECTORIES + NAMES sql.h + HINTS + /usr/include + /usr/include/iodbc + /usr/include/odbc + /usr/local/include + /usr/local/include/iodbc + /usr/local/include/odbc + /usr/local/iodbc/include + /usr/local/odbc/include + "C:/Program Files/ODBC/include" + "C:/Program Files/Microsoft SDKs/Windows/v7.0/include" + "C:/Program Files/Microsoft SDKs/Windows/v6.0a/include" + "C:/ODBC/include" + DOC "Specify the directory containing sql.h." + ) + + find_library(ODBC_LIBRARIES + NAMES iodbc odbc iodbcinst odbcinst odbc32 + HINTS + /usr/lib + /usr/lib/iodbc + /usr/lib/odbc + /usr/local/lib + /usr/local/lib/iodbc + /usr/local/lib/odbc + /usr/local/iodbc/lib + /usr/local/odbc/lib + "C:/Program Files/ODBC/lib" + "C:/ODBC/lib/debug" + "C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/Lib" + DOC "Specify the ODBC driver manager library here." + ) + + # MinGW find usually fails + if(MINGW) + set(ODBC_INCLUDE_DIRECTORIES ".") + set(ODBC_LIBRARIES odbc32) + endif() + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(ODBC + DEFAULT_MSG + ODBC_INCLUDE_DIRECTORIES + ODBC_LIBRARIES) + + mark_as_advanced(ODBC_FOUND ODBC_LIBRARIES ODBC_INCLUDE_DIRECTORIES) + endif () +endif () + +message (STATUS "Using odbc: ${ODBC_INCLUDE_DIRECTORIES} : ${ODBC_LIBRARIES}") diff --git a/cmake/find_poco.cmake b/cmake/find_poco.cmake index 947d31951c9..f0bc535f614 100644 --- a/cmake/find_poco.cmake +++ b/cmake/find_poco.cmake @@ -92,8 +92,7 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) endif () endif () - # TODO! fix internal ssl - if (OPENSSL_FOUND AND NOT USE_INTERNAL_SSL_LIBRARY AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)) + if (OPENSSL_FOUND AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)) set (Poco_NetSSL_LIBRARY PocoNetSSL) set (Poco_Crypto_LIBRARY PocoCrypto) endif () diff --git a/cmake/find_rdkafka.cmake b/cmake/find_rdkafka.cmake index 396be18cd1c..447e93e28a6 100644 --- a/cmake/find_rdkafka.cmake +++ b/cmake/find_rdkafka.cmake @@ -1,8 +1,10 @@ -option (ENABLE_RDKAFKA "Enable kafka" ${NOT_MSVC}) +option (ENABLE_RDKAFKA "Enable kafka" ON) if (ENABLE_RDKAFKA) -option (USE_INTERNAL_RDKAFKA_LIBRARY "Set to FALSE to use system librdkafka instead of the bundled" ${NOT_UNBUNDLED}) +if (OS_LINUX_X86_64) + option (USE_INTERNAL_RDKAFKA_LIBRARY "Set to FALSE to use system librdkafka instead of the bundled" ${NOT_UNBUNDLED}) +endif () if (USE_INTERNAL_RDKAFKA_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/librdkafka/CMakeLists.txt") message (WARNING "submodule contrib/librdkafka is missing. to fix try run: \n git submodule update --init --recursive") @@ -13,7 +15,7 @@ endif () if (NOT USE_INTERNAL_RDKAFKA_LIBRARY) find_library (RDKAFKA_LIB rdkafka) find_path (RDKAFKA_INCLUDE_DIR NAMES librdkafka/rdkafka.h PATHS ${RDKAFKA_INCLUDE_PATHS}) - if (USE_STATIC_LIBRARIES AND NOT ARCH_FREEBSD) + if (USE_STATIC_LIBRARIES AND NOT OS_FREEBSD) find_library (SASL2_LIBRARY sasl2) endif () endif () diff --git a/cmake/find_rt.cmake b/cmake/find_rt.cmake index 82ec314d195..25614fe55eb 100644 --- a/cmake/find_rt.cmake +++ b/cmake/find_rt.cmake @@ -1,7 +1,7 @@ if (APPLE) # lib from libs/libcommon set (RT_LIBRARY "apple_rt") -elseif (ARCH_FREEBSD) +elseif (OS_FREEBSD) find_library (RT_LIBRARY rt) else () set (RT_LIBRARY "") diff --git a/cmake/find_ssl.cmake b/cmake/find_ssl.cmake index ec40e498da1..51e869f86ea 100644 --- a/cmake/find_ssl.cmake +++ b/cmake/find_ssl.cmake @@ -1,4 +1,4 @@ -option (USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${MSVC}) +option (USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${OS_LINUX}) set (OPENSSL_USE_STATIC_LIBS ${USE_STATIC_LIBRARIES}) diff --git a/cmake/find_zlib.cmake b/cmake/find_zlib.cmake index 17350f9fd58..501a50f688b 100644 --- a/cmake/find_zlib.cmake +++ b/cmake/find_zlib.cmake @@ -1,4 +1,6 @@ -option (USE_INTERNAL_ZLIB_LIBRARY "Set to FALSE to use system zlib library instead of bundled" ${NOT_UNBUNDLED}) +if (NOT OS_FREEBSD) + option (USE_INTERNAL_ZLIB_LIBRARY "Set to FALSE to use system zlib library instead of bundled" ${NOT_UNBUNDLED}) +endif () if (NOT USE_INTERNAL_ZLIB_LIBRARY) find_package (ZLIB) @@ -17,7 +19,7 @@ if (NOT ZLIB_FOUND) set (USE_INTERNAL_ZLIB_LIBRARY 1) set (ZLIB_COMPAT 1) # for zlib-ng, also enables WITH_GZFILEOP set (WITH_NATIVE_INSTRUCTIONS ${ARCHNATIVE}) - if (ARCH_FREEBSD OR ARCH_I386) + if (OS_FREEBSD OR ARCH_I386) set (WITH_OPTIM 0 CACHE INTERNAL "") # Bug in assembler endif () if (ARCH_AARCH64) diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index bac27578663..a90533345e6 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -1,27 +1,37 @@ +option (SANITIZE "Enable sanitizer: address, memory, thread, undefined" "") + set (SAN_FLAGS "${SAN_FLAGS} -g -fno-omit-frame-pointer -DSANITIZER") -if (SAN_DEBUG) - set (SAN_FLAGS "${SAN_FLAGS} -O0") -else () - set (SAN_FLAGS "${SAN_FLAGS} -O3") -endif () -set (CMAKE_CXX_FLAGS_ASAN "${CMAKE_CXX_FLAGS_ASAN} ${SAN_FLAGS} -fsanitize=address") -set (CMAKE_C_FLAGS_ASAN "${CMAKE_C_FLAGS_ASAN} ${SAN_FLAGS} -fsanitize=address") -set (CMAKE_EXE_LINKER_FLAGS_ASAN "${CMAKE_EXE_LINKER_FLAGS_ASAN} -fsanitize=address") -set (CMAKE_CXX_FLAGS_UBSAN "${CMAKE_CXX_FLAGS_UBSAN} ${SAN_FLAGS} -fsanitize=undefined") -set (CMAKE_C_FLAGS_UBSAN "${CMAKE_C_FLAGS_UBSAN} ${SAN_FLAGS} -fsanitize=undefined") -set (CMAKE_EXE_LINKER_FLAGS_UBSAN "${CMAKE_EXE_LINKER_FLAGS_UBSAN} -fsanitize=undefined") -set (CMAKE_CXX_FLAGS_MSAN "${CMAKE_CXX_FLAGS_MSAN} ${SAN_FLAGS} -fsanitize=memory") -set (CMAKE_C_FLAGS_MSAN "${CMAKE_C_FLAGS_MSAN} ${SAN_FLAGS} -fsanitize=memory") -set (CMAKE_EXE_LINKER_FLAGS_MSAN "${CMAKE_EXE_LINKER_FLAGS_MSAN} -fsanitize=memory") -set (CMAKE_CXX_FLAGS_TSAN "${CMAKE_CXX_FLAGS_TSAN} ${SAN_FLAGS} -fsanitize=thread") -set (CMAKE_C_FLAGS_TSAN "${CMAKE_C_FLAGS_TSAN} ${SAN_FLAGS} -fsanitize=thread") -set (CMAKE_EXE_LINKER_FLAGS_TSAN "${CMAKE_EXE_LINKER_FLAGS_TSAN} -fsanitize=thread") - -# clang use static linking by default -if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS_ASAN "${CMAKE_EXE_LINKER_FLAGS_ASAN} -static-libasan") - set (CMAKE_EXE_LINKER_FLAGS_UBSAN "${CMAKE_EXE_LINKER_FLAGS_UBSAN} -static-libubsan") - set (CMAKE_EXE_LINKER_FLAGS_MSAN "${CMAKE_EXE_LINKER_FLAGS_MSAN} -static-libmsan") - set (CMAKE_EXE_LINKER_FLAGS_TSAN "${CMAKE_EXE_LINKER_FLAGS_TSAN} -static-libtsan") -endif () +if (SANITIZE) + if (SANITIZE STREQUAL "address") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=address") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=address") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address") + if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan") + endif () + elseif (SANITIZE STREQUAL "memory") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=memory") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=memory") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=memory") + if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libmsan") + endif () + elseif (SANITIZE STREQUAL "thread") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=thread") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=thread") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread") + if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libtsan") + endif () + elseif (SANITIZE STREQUAL "undefined") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=undefined") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=undefined") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined") + if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan") + endif () + else () + message (FATAL_ERROR "Unknown sanitizer type: ${SANITIZE}") + endif () +endif() diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 2f5e003fc2f..6faa507a356 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -1,5 +1,11 @@ -if (NOT MSVC) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast -Wno-unused-function -Wno-deprecated-declarations -Wno-non-virtual-dtor -std=c++1z") +# Third-party libraries may have substandard code. + +if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-maybe-uninitialized -Wno-format -Wno-misleading-indentation") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast -Wno-unused-function -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-non-virtual-dtor -Wno-maybe-uninitialized -Wno-format -Wno-misleading-indentation -Wno-implicit-fallthrough -std=c++1z") +elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function -Wno-unused-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-format") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast -Wno-unused-function -Wno-unused-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-non-virtual-dtor -Wno-format -std=c++1z") endif () if (USE_INTERNAL_BOOST_LIBRARY) @@ -37,6 +43,8 @@ if (USE_INTERNAL_METROHASH_LIBRARY) add_subdirectory (libmetrohash) endif () +add_subdirectory (murmurhash) + if (USE_INTERNAL_BTRIE_LIBRARY) add_subdirectory (libbtrie) endif () @@ -75,7 +83,11 @@ if (ENABLE_TCMALLOC AND USE_INTERNAL_GPERFTOOLS_LIBRARY) add_subdirectory (libtcmalloc) endif () -if (NOT ARCH_ARM) +if (ENABLE_JEMALLOC AND USE_INTERNAL_JEMALLOC_LIBRARY) + add_subdirectory (jemalloc-cmake) +endif () + +if (USE_INTERNAL_CPUID_LIBRARY) add_subdirectory (libcpuid) endif () @@ -84,32 +96,26 @@ if (USE_INTERNAL_SSL_LIBRARY) set (BUILD_SHARED 1) endif () set (USE_SHARED ${USE_STATIC_LIBRARIES}) + set (LIBRESSL_SKIP_INSTALL 1) add_subdirectory (ssl) - target_include_directories(${OPENSSL_CRYPTO_LIBRARY} PUBLIC ${OPENSSL_INCLUDE_DIR}) - target_include_directories(${OPENSSL_SSL_LIBRARY} PUBLIC ${OPENSSL_INCLUDE_DIR}) + target_include_directories(${OPENSSL_CRYPTO_LIBRARY} SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR}) + target_include_directories(${OPENSSL_SSL_LIBRARY} SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR}) +endif () + +if (ENABLE_MYSQL AND USE_INTERNAL_MYSQL_LIBRARY) + add_subdirectory (mariadb-connector-c-cmake) + target_include_directories(mysqlclient PRIVATE BEFORE ${ZLIB_INCLUDE_DIR}) + target_include_directories(mysqlclient PRIVATE BEFORE ${OPENSSL_INCLUDE_DIR}) endif () if (USE_INTERNAL_RDKAFKA_LIBRARY) - set (RDKAFKA_BUILD_EXAMPLES OFF CACHE INTERNAL "") - set (RDKAFKA_BUILD_TESTS OFF CACHE INTERNAL "") - set (RDKAFKA_BUILD_STATIC ${MAKE_STATIC_LIBRARIES} CACHE INTERNAL "") - mark_as_advanced (ZLIB_INCLUDE_DIR) - - if (USE_INTERNAL_SSL_LIBRARY) - if (MAKE_STATIC_LIBRARIES) - add_library(bundled-ssl ALIAS ${OPENSSL_SSL_LIBRARY}) - set (WITH_BUNDLED_SSL 1 CACHE INTERNAL "") - else () - set (WITH_SSL 0 CACHE INTERNAL "") - endif () - endif () - - add_subdirectory (librdkafka) - - if (USE_INTERNAL_SSL_LIBRARY AND MAKE_STATIC_LIBRARIES) - target_include_directories(rdkafka PRIVATE BEFORE ${OPENSSL_INCLUDE_DIR}) - endif () + add_subdirectory (librdkafka-cmake) target_include_directories(rdkafka PRIVATE BEFORE ${ZLIB_INCLUDE_DIR}) + target_include_directories(rdkafka PRIVATE BEFORE ${OPENSSL_INCLUDE_DIR}) +endif () + +if (ENABLE_ODBC AND USE_INTERNAL_ODBC_LIBRARY) + add_subdirectory (unixodbc-cmake) endif () if (USE_INTERNAL_CAPNP_LIBRARY) @@ -128,11 +134,6 @@ if (USE_INTERNAL_POCO_LIBRARY) set (_save ${ENABLE_TESTS}) set (ENABLE_TESTS 0) set (CMAKE_DISABLE_FIND_PACKAGE_ZLIB 1) - if (USE_INTERNAL_SSL_LIBRARY OR (DEFINED ENABLE_POCO_NETSSL AND NOT ENABLE_POCO_NETSSL)) - set (DISABLE_INTERNAL_OPENSSL 1 CACHE INTERNAL "") - set (ENABLE_NETSSL 0 CACHE INTERNAL "") # TODO! - set (ENABLE_CRYPTO 0 CACHE INTERNAL "") # TODO! - endif () if (MSVC) set (ENABLE_DATA_ODBC 0 CACHE INTERNAL "") # TODO (build fail) endif () @@ -144,10 +145,14 @@ if (USE_INTERNAL_POCO_LIBRARY) if (OPENSSL_FOUND AND TARGET Crypto AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)) # Bug in poco https://github.com/pocoproject/poco/pull/2100 found on macos - target_include_directories(Crypto PUBLIC ${OPENSSL_INCLUDE_DIR}) + target_include_directories(Crypto SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR}) endif () endif () if (USE_INTERNAL_LLVM_LIBRARY) + # ld: unknown option: --color-diagnostics + if (APPLE AND COMPILER_GCC) + set (LINKER_SUPPORTS_COLOR_DIAGNOSTICS 0 CACHE INTERNAL "") + endif () add_subdirectory (llvm/llvm) endif () diff --git a/contrib/boost-cmake/CMakeLists.txt b/contrib/boost-cmake/CMakeLists.txt index 2a89293c902..7e2379c5738 100644 --- a/contrib/boost-cmake/CMakeLists.txt +++ b/contrib/boost-cmake/CMakeLists.txt @@ -42,9 +42,9 @@ ${LIBRARY_DIR}/libs/filesystem/src/windows_file_codecvt.cpp) add_library(boost_system_internal ${LIBRARY_DIR}/libs/system/src/error_code.cpp) -target_include_directories (boost_program_options_internal BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) -target_include_directories (boost_filesystem_internal BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) -target_include_directories (boost_system_internal BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) +target_include_directories (boost_program_options_internal SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) +target_include_directories (boost_filesystem_internal SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) +target_include_directories (boost_system_internal SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) target_compile_definitions (boost_program_options_internal PUBLIC BOOST_SYSTEM_NO_DEPRECATED) target_compile_definitions (boost_filesystem_internal PUBLIC BOOST_SYSTEM_NO_DEPRECATED) diff --git a/contrib/jemalloc b/contrib/jemalloc new file mode 160000 index 00000000000..41b7372eade --- /dev/null +++ b/contrib/jemalloc @@ -0,0 +1 @@ +Subproject commit 41b7372eadee941b9164751b8d4963f915d3ceae diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt new file mode 100644 index 00000000000..d60d34604a9 --- /dev/null +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -0,0 +1,52 @@ +set(JEMALLOC_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/jemalloc) + +set(SRCS +${JEMALLOC_SOURCE_DIR}/src/arena.c +${JEMALLOC_SOURCE_DIR}/src/background_thread.c +${JEMALLOC_SOURCE_DIR}/src/base.c +${JEMALLOC_SOURCE_DIR}/src/bin.c +${JEMALLOC_SOURCE_DIR}/src/bitmap.c +${JEMALLOC_SOURCE_DIR}/src/ckh.c +${JEMALLOC_SOURCE_DIR}/src/ctl.c +${JEMALLOC_SOURCE_DIR}/src/div.c +${JEMALLOC_SOURCE_DIR}/src/extent.c +${JEMALLOC_SOURCE_DIR}/src/extent_dss.c +${JEMALLOC_SOURCE_DIR}/src/extent_mmap.c +${JEMALLOC_SOURCE_DIR}/src/hash.c +${JEMALLOC_SOURCE_DIR}/src/hook.c +${JEMALLOC_SOURCE_DIR}/src/jemalloc.c +${JEMALLOC_SOURCE_DIR}/src/jemalloc_cpp.cpp +${JEMALLOC_SOURCE_DIR}/src/large.c +${JEMALLOC_SOURCE_DIR}/src/log.c +${JEMALLOC_SOURCE_DIR}/src/malloc_io.c +${JEMALLOC_SOURCE_DIR}/src/mutex.c +${JEMALLOC_SOURCE_DIR}/src/mutex_pool.c +${JEMALLOC_SOURCE_DIR}/src/nstime.c +${JEMALLOC_SOURCE_DIR}/src/pages.c +${JEMALLOC_SOURCE_DIR}/src/prng.c +${JEMALLOC_SOURCE_DIR}/src/prof.c +${JEMALLOC_SOURCE_DIR}/src/rtree.c +${JEMALLOC_SOURCE_DIR}/src/sc.c +${JEMALLOC_SOURCE_DIR}/src/stats.c +${JEMALLOC_SOURCE_DIR}/src/sz.c +${JEMALLOC_SOURCE_DIR}/src/tcache.c +${JEMALLOC_SOURCE_DIR}/src/test_hooks.c +${JEMALLOC_SOURCE_DIR}/src/ticker.c +${JEMALLOC_SOURCE_DIR}/src/tsd.c +${JEMALLOC_SOURCE_DIR}/src/witness.c +) + +if(CMAKE_SYSTEM_NAME MATCHES "Darwin") + list(APPEND SRCS ${JEMALLOC_SOURCE_DIR}/src/zone.c) +endif() + +add_library(jemalloc STATIC ${SRCS}) + +target_include_directories(jemalloc PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/include + ${CMAKE_CURRENT_SOURCE_DIR}/include_linux_x86_64) # jemalloc.h + +target_include_directories(jemalloc PRIVATE + ${JEMALLOC_SOURCE_DIR}/include) + +target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_NO_PRIVATE_NAMESPACE) diff --git a/contrib/jemalloc-cmake/README b/contrib/jemalloc-cmake/README new file mode 100644 index 00000000000..0af9c4f0e45 --- /dev/null +++ b/contrib/jemalloc-cmake/README @@ -0,0 +1 @@ +It allows to integrate JEMalloc into CMake project. diff --git a/contrib/jemalloc-cmake/include/jemalloc/jemalloc.h b/contrib/jemalloc-cmake/include/jemalloc/jemalloc.h new file mode 100644 index 00000000000..d06243c5239 --- /dev/null +++ b/contrib/jemalloc-cmake/include/jemalloc/jemalloc.h @@ -0,0 +1,16 @@ +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +} +#endif + diff --git a/contrib/jemalloc-cmake/include/jemalloc/jemalloc_rename.h b/contrib/jemalloc-cmake/include/jemalloc/jemalloc_rename.h new file mode 100644 index 00000000000..a2ea2dd3533 --- /dev/null +++ b/contrib/jemalloc-cmake/include/jemalloc/jemalloc_rename.h @@ -0,0 +1,29 @@ +/* + * Name mangling for public symbols is controlled by --with-mangling and + * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by + * these macro definitions. + */ +#ifndef JEMALLOC_NO_RENAME +# define je_aligned_alloc aligned_alloc +# define je_calloc calloc +# define je_dallocx dallocx +# define je_free free +# define je_mallctl mallctl +# define je_mallctlbymib mallctlbymib +# define je_mallctlnametomib mallctlnametomib +# define je_malloc malloc +# define je_malloc_conf malloc_conf +# define je_malloc_message malloc_message +# define je_malloc_stats_print malloc_stats_print +# define je_malloc_usable_size malloc_usable_size +# define je_mallocx mallocx +# define je_nallocx nallocx +# define je_posix_memalign posix_memalign +# define je_rallocx rallocx +# define je_realloc realloc +# define je_sallocx sallocx +# define je_sdallocx sdallocx +# define je_xallocx xallocx +# define je_memalign memalign +# define je_valloc valloc +#endif diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/README b/contrib/jemalloc-cmake/include_linux_x86_64/README new file mode 100644 index 00000000000..bf7663bda8d --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/README @@ -0,0 +1,7 @@ +Here are pre-generated files from jemalloc on Linux x86_64. +You can obtain these files by running ./autogen.sh inside jemalloc source directory. + +Added #define GNU_SOURCE +Added JEMALLOC_OVERRIDE___POSIX_MEMALIGN because why not. +Removed JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard. +Removed JEMALLOC_PURGE_MADVISE_FREE because it's available only from Linux 4.5. diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h new file mode 100644 index 00000000000..43936e8eba0 --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h @@ -0,0 +1,373 @@ +/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ + +#ifndef _GNU_SOURCE + #define _GNU_SOURCE +#endif + +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +/* #undef JEMALLOC_PREFIX */ +/* #undef JEMALLOC_CPREFIX */ + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE___LIBC_CALLOC +#define JEMALLOC_OVERRIDE___LIBC_FREE +#define JEMALLOC_OVERRIDE___LIBC_MALLOC +#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN +#define JEMALLOC_OVERRIDE___LIBC_REALLOC +#define JEMALLOC_OVERRIDE___LIBC_VALLOC +#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#define JEMALLOC_PRIVATE_NAMESPACE je_ + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#define CPU_SPINWAIT __asm__ volatile("pause") +/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ +#define HAVE_CPU_SPINWAIT 1 + +/* + * Number of significant bits in virtual addresses. This may be less than the + * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 + * bits are the same as bit 47. + */ +#define LG_VADDR 48 + +/* Defined if C11 atomics are available. */ +#define JEMALLOC_C11_ATOMICS 1 + +/* Defined if GCC __atomic atomics are available. */ +#define JEMALLOC_GCC_ATOMIC_ATOMICS 1 + +/* Defined if GCC __sync atomics are available. */ +#define JEMALLOC_GCC_SYNC_ATOMICS 1 + +/* + * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and + * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ + +/* + * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and + * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ + +/* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +#define JEMALLOC_HAVE_BUILTIN_CLZ + +/* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +/* #undef JEMALLOC_OS_UNFAIR_LOCK */ + +/* + * Defined if OSSpin*() functions are available, as provided by Darwin, and + * documented in the spinlock(3) manual page. + */ +/* #undef JEMALLOC_OSSPIN */ + +/* Defined if syscall(2) is usable. */ +#define JEMALLOC_USE_SYSCALL + +/* + * Defined if secure_getenv(3) is available. + */ +// Don't want dependency on newer GLIBC +//#define JEMALLOC_HAVE_SECURE_GETENV + +/* + * Defined if issetugid(2) is available. + */ +/* #undef JEMALLOC_HAVE_ISSETUGID */ + +/* Defined if pthread_atfork(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_ATFORK + +/* Defined if pthread_setname_np(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1 + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1 + +/* + * Defined if mach_absolute_time() is available. + */ +/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +#define JEMALLOC_THREADED_INIT + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +/* #undef JEMALLOC_MUTEX_INIT_CB */ + +/* Non-empty if the tls_model attribute is supported. */ +#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +/* #undef JEMALLOC_DEBUG */ + +/* JEMALLOC_STATS enables statistics calculation. */ +#define JEMALLOC_STATS + +/* JEMALLOC_PROF enables allocation profiling. */ +/* #undef JEMALLOC_PROF */ + +/* Use libunwind for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBUNWIND */ + +/* Use libgcc for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBGCC */ + +/* Use gcc intrinsics for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_GCC */ + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage + * segment (DSS). + */ +#define JEMALLOC_DSS + +/* Support memory filling (junk/zero). */ +#define JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +/* #undef JEMALLOC_UTRACE */ + +/* Support optional abort() on OOM. */ +/* #undef JEMALLOC_XMALLOC */ + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +/* #undef JEMALLOC_LAZY_LOCK */ + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +/* #undef LG_QUANTUM */ + +/* One page is 2^LG_PAGE bytes. */ +#define LG_PAGE 12 + +/* + * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the + * system does not explicitly support huge pages; system calls that require + * explicit huge page support are separately configured. + */ +#define LG_HUGEPAGE 21 + +/* + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. + */ +#define JEMALLOC_MAPS_COALESCE + +/* + * If defined, retain memory for later reuse by default rather than using e.g. + * munmap() to unmap freed extents. This is enabled on 64-bit Linux because + * common sequences of mmap()/munmap() calls will cause virtual memory map + * holes. + */ +#define JEMALLOC_RETAIN + +/* TLS is used to map arenas and magazine caches to threads. */ +#define JEMALLOC_TLS + +/* + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h + */ +#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable + +/* + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. + */ +#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll +#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl +#define JEMALLOC_INTERNAL_FFS __builtin_ffs + +/* + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. + */ +#define JEMALLOC_CACHE_OBLIVIOUS + +/* + * If defined, enable logging facilities. We make this a configure option to + * avoid taking extra branches everywhere. + */ +/* #undef JEMALLOC_LOG */ + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +/* #undef JEMALLOC_ZONE */ + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ +#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* Defined if madvise(2) is available. */ +#define JEMALLOC_HAVE_MADVISE + +/* + * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE + * arguments to madvise(2). + */ +#define JEMALLOC_HAVE_MADVISE_HUGE + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is + * defined, this immediately discards pages, + * such that new pages will be demand-zeroed if + * the address region is later touched; + * otherwise this behaves similarly to + * MADV_FREE, though typically with higher + * system overhead. + */ +//#define JEMALLOC_PURGE_MADVISE_FREE +#define JEMALLOC_PURGE_MADVISE_DONTNEED +#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS + +/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ +/* #undef JEMALLOC_DEFINE_MADVISE_FREE */ + +/* + * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. + */ +#define JEMALLOC_MADVISE_DONTDUMP + +/* + * Defined if transparent huge pages (THPs) are supported via the + * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. + */ +/* #undef JEMALLOC_THP */ + +/* Define if operating system has alloca.h header. */ +#define JEMALLOC_HAS_ALLOCA_H 1 + +/* C99 restrict keyword supported. */ +#define JEMALLOC_HAS_RESTRICT 1 + +/* For use by hash code. */ +/* #undef JEMALLOC_BIG_ENDIAN */ + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#define LG_SIZEOF_INT 2 + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#define LG_SIZEOF_LONG 3 + +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#define LG_SIZEOF_LONG_LONG 3 + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#define LG_SIZEOF_INTMAX_T 3 + +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +#define JEMALLOC_GLIBC_MALLOC_HOOK + +/* glibc memalign hook. */ +#define JEMALLOC_GLIBC_MEMALIGN_HOOK + +/* pthread support */ +#define JEMALLOC_HAVE_PTHREAD + +/* dlsym() support */ +#define JEMALLOC_HAVE_DLSYM + +/* Adaptive mutex support in pthreads. */ +#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP + +/* GNU specific sched_getcpu support */ +#define JEMALLOC_HAVE_SCHED_GETCPU + +/* GNU specific sched_setaffinity support */ +#define JEMALLOC_HAVE_SCHED_SETAFFINITY + +/* + * If defined, all the features necessary for background threads are present. + */ +#define JEMALLOC_BACKGROUND_THREAD 1 + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +/* #undef JEMALLOC_EXPORT */ + +/* config.malloc_conf options string. */ +#define JEMALLOC_CONFIG_MALLOC_CONF "" + +/* If defined, jemalloc takes the malloc/free/etc. symbol names. */ +#define JEMALLOC_IS_MALLOC 1 + +/* + * Defined if strerror_r returns char * if _GNU_SOURCE is defined. + */ +#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_preamble.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_preamble.h new file mode 100644 index 00000000000..c150785fb4a --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_preamble.h @@ -0,0 +1,194 @@ +#ifndef JEMALLOC_PREAMBLE_H +#define JEMALLOC_PREAMBLE_H + +#include "jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +#ifdef JEMALLOC_UTRACE +#include +#endif + +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# undef JEMALLOC_IS_MALLOC +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "jemalloc/jemalloc.h" +# undef JEMALLOC_NO_RENAME +#else +# define JEMALLOC_N(n) je_##n +# include "jemalloc/jemalloc.h" +#endif + +#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) +#include +#endif + +#ifdef JEMALLOC_ZONE +#include +#include +#include +#endif + +#include "jemalloc/internal/jemalloc_internal_macros.h" + +/* + * Note that the ordering matters here; the hook itself is name-mangled. We + * want the inclusion of hooks to happen early, so that we hook as much as + * possible. + */ +#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE +# ifndef JEMALLOC_JET +# include "jemalloc/internal/private_namespace.h" +# else +# include "jemalloc/internal/private_namespace_jet.h" +# endif +#endif +#include "jemalloc/internal/test_hooks.h" + +#ifdef JEMALLOC_DEFINE_MADVISE_FREE +# define JEMALLOC_MADV_FREE 8 +#endif + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool have_madvise_huge = +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool maps_coalesce = +#ifdef JEMALLOC_MAPS_COALESCE + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; +/* + * Undocumented, for jemalloc development use only at the moment. See the note + * in jemalloc/internal/log.h. + */ +static const bool config_log = +#ifdef JEMALLOC_LOG + true +#else + false +#endif + ; +#ifdef JEMALLOC_HAVE_SCHED_GETCPU +/* Currently percpu_arena depends on sched_getcpu. */ +#define JEMALLOC_PERCPU_ARENA +#endif +static const bool have_percpu_arena = +#ifdef JEMALLOC_PERCPU_ARENA + true +#else + false +#endif + ; +/* + * Undocumented, and not recommended; the application should take full + * responsibility for tracking provenance. + */ +static const bool force_ivsalloc = +#ifdef JEMALLOC_FORCE_IVSALLOC + true +#else + false +#endif + ; +static const bool have_background_thread = +#ifdef JEMALLOC_BACKGROUND_THREAD + true +#else + false +#endif + ; + +#endif /* JEMALLOC_PREAMBLE_H */ diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_defs.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_defs.h new file mode 100644 index 00000000000..d1389237a77 --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_defs.h @@ -0,0 +1,43 @@ +/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ +/* Defined if __attribute__((...)) syntax is supported. */ +#define JEMALLOC_HAVE_ATTR + +/* Defined if alloc_size attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE + +/* Defined if format(printf, ...) attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE_MEMALIGN +#define JEMALLOC_OVERRIDE_VALLOC + +/* + * At least Linux omits the "const" in: + * + * size_t malloc_usable_size(const void *ptr); + * + * Match the operating system's prototype. + */ +#define JEMALLOC_USABLE_SIZE_CONST + +/* + * If defined, specify throw() for the public function prototypes when compiling + * with C++. The only justification for this is to match the prototypes that + * glibc defines. + */ +#define JEMALLOC_USE_CXX_THROW + +#ifdef _MSC_VER +# ifdef _WIN64 +# define LG_SIZEOF_PTR_WIN 3 +# else +# define LG_SIZEOF_PTR_WIN 2 +# endif +#endif + +/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ +#define LG_SIZEOF_PTR 3 diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_macros.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_macros.h new file mode 100644 index 00000000000..7432f1cda53 --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_macros.h @@ -0,0 +1,122 @@ +#include +#include +#include +#include +#include + +#define JEMALLOC_VERSION "5.1.0-56-g41b7372eadee941b9164751b8d4963f915d3ceae" +#define JEMALLOC_VERSION_MAJOR 5 +#define JEMALLOC_VERSION_MINOR 1 +#define JEMALLOC_VERSION_BUGFIX 0 +#define JEMALLOC_VERSION_NREV 56 +#define JEMALLOC_VERSION_GID "41b7372eadee941b9164751b8d4963f915d3ceae" + +#define MALLOCX_LG_ALIGN(la) ((int)(la)) +#if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) +#else +# define MALLOCX_ALIGN(a) \ + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) +#endif +#define MALLOCX_ZERO ((int)0x40) +/* + * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 + * encodes MALLOCX_TCACHE_NONE. + */ +#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +/* + * Bias arena index bits so that 0 encodes "use an automatically chosen arena". + */ +#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) + +/* + * Use as arena index in "arena..{purge,decay,dss}" and + * "stats.arenas..*" mallctl interfaces to select all arenas. This + * definition is intentionally specified in raw decimal format to support + * cpp-based string concatenation, e.g. + * + * #define STRINGIFY_HELPER(x) #x + * #define STRINGIFY(x) STRINGIFY_HELPER(x) + * + * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, + * 0); + */ +#define MALLCTL_ARENAS_ALL 4096 +/* + * Use as arena index in "stats.arenas..*" mallctl interfaces to select + * destroyed arenas. + */ +#define MALLCTL_ARENAS_DESTROYED 4097 + +#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) +# define JEMALLOC_CXX_THROW throw() +#else +# define JEMALLOC_CXX_THROW +#endif + +#if defined(_MSC_VER) +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# ifndef JEMALLOC_EXPORT +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# endif +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE __declspec(noinline) +# ifdef __cplusplus +# define JEMALLOC_NOTHROW __declspec(nothrow) +# else +# define JEMALLOC_NOTHROW +# endif +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) +# if _MSC_VER >= 1900 && !defined(__EDG__) +# define JEMALLOC_ALLOCATOR __declspec(allocator) +# else +# define JEMALLOC_ALLOCATOR +# endif +#elif defined(JEMALLOC_HAVE_ATTR) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) +# else +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) +# else +# define JEMALLOC_FORMAT_PRINTF(s, i) +# endif +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# define JEMALLOC_EXPORT +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE +# define JEMALLOC_NOTHROW +# define JEMALLOC_SECTION(s) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#endif diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_protos.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_protos.h new file mode 100644 index 00000000000..ff025e30fa7 --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_protos.h @@ -0,0 +1,66 @@ +/* + * The je_ prefix on the following public symbol declarations is an artifact + * of namespace management, and should be omitted in application code unless + * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). + */ +extern JEMALLOC_EXPORT const char *je_malloc_conf; +extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, + const char *s); + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_malloc(size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, + size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, + size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) + JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) + JEMALLOC_CXX_THROW; + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, + int flags) JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, + size_t extra, int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, + int flags) JEMALLOC_ATTR(pure); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, + int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) + JEMALLOC_ATTR(pure); + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, + void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, + size_t *mibp, size_t *miblenp); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( + void (*write_cb)(void *, const char *), void *je_cbopaque, + const char *opts); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(malloc); +#endif diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_typedefs.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_typedefs.h new file mode 100644 index 00000000000..1a58874306e --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_typedefs.h @@ -0,0 +1,77 @@ +typedef struct extent_hooks_s extent_hooks_t; + +/* + * void * + * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + * size_t alignment, bool *zero, bool *commit, unsigned arena_ind); + */ +typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *, + bool *, unsigned); + +/* + * bool + * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size, + * bool committed, unsigned arena_ind); + */ +typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool, + unsigned); + +/* + * void + * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size, + * bool committed, unsigned arena_ind); + */ +typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool, + unsigned); + +/* + * bool + * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + unsigned); + +/* + * bool + * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t, + size_t, unsigned); + +/* + * bool + * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + unsigned); + +/* + * bool + * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t size_a, size_t size_b, bool committed, unsigned arena_ind); + */ +typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + bool, unsigned); + +/* + * bool + * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + * void *addr_b, size_t size_b, bool committed, unsigned arena_ind); + */ +typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t, + bool, unsigned); + +struct extent_hooks_s { + extent_alloc_t *alloc; + extent_dalloc_t *dalloc; + extent_destroy_t *destroy; + extent_commit_t *commit; + extent_decommit_t *decommit; + extent_purge_t *purge_lazy; + extent_purge_t *purge_forced; + extent_split_t *split; + extent_merge_t *merge; +}; diff --git a/contrib/libcpuid/CMakeLists.txt b/contrib/libcpuid/CMakeLists.txt index c04acf99f36..cd3e7fa06fe 100644 --- a/contrib/libcpuid/CMakeLists.txt +++ b/contrib/libcpuid/CMakeLists.txt @@ -17,4 +17,4 @@ include/libcpuid/recog_amd.h include/libcpuid/recog_intel.h ) -target_include_directories (cpuid PUBLIC include) +target_include_directories (cpuid SYSTEM PUBLIC include) diff --git a/contrib/librdkafka-cmake/CMakeLists.txt b/contrib/librdkafka-cmake/CMakeLists.txt new file mode 100644 index 00000000000..7211c791b2f --- /dev/null +++ b/contrib/librdkafka-cmake/CMakeLists.txt @@ -0,0 +1,60 @@ +set(RDKAFKA_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/librdkafka/src) + +set(SRCS +${RDKAFKA_SOURCE_DIR}/crc32c.c +${RDKAFKA_SOURCE_DIR}/rdaddr.c +${RDKAFKA_SOURCE_DIR}/rdavl.c +${RDKAFKA_SOURCE_DIR}/rdbuf.c +${RDKAFKA_SOURCE_DIR}/rdcrc32.c +${RDKAFKA_SOURCE_DIR}/rdkafka.c +${RDKAFKA_SOURCE_DIR}/rdkafka_assignor.c +${RDKAFKA_SOURCE_DIR}/rdkafka_broker.c +${RDKAFKA_SOURCE_DIR}/rdkafka_buf.c +${RDKAFKA_SOURCE_DIR}/rdkafka_cgrp.c +${RDKAFKA_SOURCE_DIR}/rdkafka_conf.c +${RDKAFKA_SOURCE_DIR}/rdkafka_event.c +${RDKAFKA_SOURCE_DIR}/rdkafka_feature.c +${RDKAFKA_SOURCE_DIR}/rdkafka_lz4.c +${RDKAFKA_SOURCE_DIR}/rdkafka_metadata.c +${RDKAFKA_SOURCE_DIR}/rdkafka_metadata_cache.c +${RDKAFKA_SOURCE_DIR}/rdkafka_msg.c +${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_reader.c +${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_writer.c +${RDKAFKA_SOURCE_DIR}/rdkafka_offset.c +${RDKAFKA_SOURCE_DIR}/rdkafka_op.c +${RDKAFKA_SOURCE_DIR}/rdkafka_partition.c +${RDKAFKA_SOURCE_DIR}/rdkafka_pattern.c +${RDKAFKA_SOURCE_DIR}/rdkafka_queue.c +${RDKAFKA_SOURCE_DIR}/rdkafka_range_assignor.c +${RDKAFKA_SOURCE_DIR}/rdkafka_request.c +${RDKAFKA_SOURCE_DIR}/rdkafka_roundrobin_assignor.c +${RDKAFKA_SOURCE_DIR}/rdkafka_sasl.c +${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_plain.c +${RDKAFKA_SOURCE_DIR}/rdkafka_subscription.c +${RDKAFKA_SOURCE_DIR}/rdkafka_timer.c +${RDKAFKA_SOURCE_DIR}/rdkafka_topic.c +${RDKAFKA_SOURCE_DIR}/rdkafka_transport.c +${RDKAFKA_SOURCE_DIR}/rdkafka_interceptor.c +${RDKAFKA_SOURCE_DIR}/rdkafka_header.c +${RDKAFKA_SOURCE_DIR}/rdlist.c +${RDKAFKA_SOURCE_DIR}/rdlog.c +${RDKAFKA_SOURCE_DIR}/rdmurmur2.c +${RDKAFKA_SOURCE_DIR}/rdports.c +${RDKAFKA_SOURCE_DIR}/rdrand.c +${RDKAFKA_SOURCE_DIR}/rdregex.c +${RDKAFKA_SOURCE_DIR}/rdstring.c +${RDKAFKA_SOURCE_DIR}/rdunittest.c +${RDKAFKA_SOURCE_DIR}/rdvarint.c +${RDKAFKA_SOURCE_DIR}/snappy.c +${RDKAFKA_SOURCE_DIR}/tinycthread.c +${RDKAFKA_SOURCE_DIR}/xxhash.c +${RDKAFKA_SOURCE_DIR}/lz4.c +${RDKAFKA_SOURCE_DIR}/lz4frame.c +${RDKAFKA_SOURCE_DIR}/lz4hc.c +${RDKAFKA_SOURCE_DIR}/rdgz.c +) + +add_library(rdkafka STATIC ${SRCS}) +target_include_directories(rdkafka PRIVATE include) +target_include_directories(rdkafka SYSTEM PUBLIC ${RDKAFKA_SOURCE_DIR}) +target_link_libraries(rdkafka PUBLIC ${ZLIB_LIBRARIES} ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) diff --git a/contrib/librdkafka-cmake/config.h b/contrib/librdkafka-cmake/config.h new file mode 100644 index 00000000000..68e93a10ff1 --- /dev/null +++ b/contrib/librdkafka-cmake/config.h @@ -0,0 +1,74 @@ +// Automatically generated by ./configure +#ifndef _CONFIG_H_ +#define _CONFIG_H_ +#define ARCH "x86_64" +#define CPU "generic" +#define WITHOUT_OPTIMIZATION 0 +#define ENABLE_DEVEL 0 +#define ENABLE_VALGRIND 0 +#define ENABLE_REFCNT_DEBUG 0 +#define ENABLE_SHAREDPTR_DEBUG 0 +#define ENABLE_LZ4_EXT 1 +#define ENABLE_SSL 1 +//#define ENABLE_SASL 1 +#define MKL_APP_NAME "librdkafka" +#define MKL_APP_DESC_ONELINE "The Apache Kafka C/C++ library" +// distro +//#define SOLIB_EXT ".so" +// gcc +//#define WITH_GCC 1 +// gxx +//#define WITH_GXX 1 +// pkgconfig +//#define WITH_PKGCONFIG 1 +// install +//#define WITH_INSTALL 1 +// PIC +//#define HAVE_PIC 1 +// gnulib +//#define WITH_GNULD 1 +// __atomic_32 +#define HAVE_ATOMICS_32 1 +// __atomic_32 +#define HAVE_ATOMICS_32_ATOMIC 1 +// atomic_32 +#define ATOMIC_OP32(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST) +// __atomic_64 +#define HAVE_ATOMICS_64 1 +// __atomic_64 +#define HAVE_ATOMICS_64_ATOMIC 1 +// atomic_64 +#define ATOMIC_OP64(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST) +// atomic_64 +#define ATOMIC_OP(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST) +// parseversion +#define RDKAFKA_VERSION_STR "0.11.4" +// parseversion +#define MKL_APP_VERSION "0.11.4" +// libdl +//#define WITH_LIBDL 1 +// WITH_PLUGINS +//#define WITH_PLUGINS 1 +// zlib +#define WITH_ZLIB 1 +// WITH_SNAPPY +#define WITH_SNAPPY 1 +// WITH_SOCKEM +#define WITH_SOCKEM 1 +// libssl +#define WITH_SSL 1 +// WITH_SASL_SCRAM +//#define WITH_SASL_SCRAM 1 +// crc32chw +#define WITH_CRC32C_HW 1 +// regex +#define HAVE_REGEX 1 +// strndup +#define HAVE_STRNDUP 1 +// strerror_r +#define HAVE_STRERROR_R 1 +// pthread_setname_gnu +#define HAVE_PTHREAD_SETNAME_GNU 1 +// python +//#define HAVE_PYTHON 1 +#endif /* _CONFIG_H_ */ diff --git a/contrib/librdkafka-cmake/include/README b/contrib/librdkafka-cmake/include/README new file mode 100644 index 00000000000..58fa024e68a --- /dev/null +++ b/contrib/librdkafka-cmake/include/README @@ -0,0 +1 @@ +This directory is needed because rdkafka files have #include "../config.h" diff --git a/contrib/mariadb-connector-c b/contrib/mariadb-connector-c new file mode 160000 index 00000000000..a0fd36cc5a5 --- /dev/null +++ b/contrib/mariadb-connector-c @@ -0,0 +1 @@ +Subproject commit a0fd36cc5a5313414a5a2ebe9322577a29b4782a diff --git a/contrib/mariadb-connector-c-cmake/CMakeLists.txt b/contrib/mariadb-connector-c-cmake/CMakeLists.txt new file mode 100644 index 00000000000..4c1184b3edb --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/CMakeLists.txt @@ -0,0 +1,66 @@ +set(MARIADB_CLIENT_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/mariadb-connector-c) +set(MARIADB_CLIENT_BINARY_DIR ${CMAKE_BINARY_DIR}/contrib/mariadb-connector-c) + +set(SRCS +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/bmove_upp.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/get_password.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_alloc.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_array.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_charset.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_compress.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_context.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_default.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_dtoa.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_errmsg.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_hash.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_init.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_io.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_list.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_ll2str.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_loaddata.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_net.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_password.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_pvio.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_async.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_charset.c +#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_dyncol.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_lib.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_stmt.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_sha1.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_stmt_codec.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_string.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_time.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_tls.c +#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/gnutls.c +#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/ma_schannel.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/openssl.c +#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/schannel.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/auth_gssapi_client.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/dialog.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/gssapi_client.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/gssapi_errmsg.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/mariadb_cleartext.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/my_auth.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/old_password.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/sha256_pw.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/sspi_client.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/sspi_errmsg.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/connection/aurora.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/connection/replication.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/io/remote_io.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/pvio/pvio_npipe.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/pvio/pvio_shmem.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/pvio/pvio_socket.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/trace/trace_example.c +${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/libmariadb/ma_client_plugin.c +) + +add_library(mysqlclient STATIC ${SRCS}) + +target_link_libraries(mysqlclient ${OPENSSL_LIBRARIES}) + +target_include_directories(mysqlclient PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include) +target_include_directories(mysqlclient PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/common/include) +target_include_directories(mysqlclient PUBLIC ${MARIADB_CLIENT_SOURCE_DIR}/include) + +target_compile_definitions(mysqlclient PRIVATE -D THREAD -D HAVE_OPENSSL -D HAVE_TLS) diff --git a/contrib/mariadb-connector-c-cmake/common/include/mysql/mysql.h b/contrib/mariadb-connector-c-cmake/common/include/mysql/mysql.h new file mode 100644 index 00000000000..741c7ba03c9 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/common/include/mysql/mysql.h @@ -0,0 +1 @@ +#include diff --git a/contrib/mariadb-connector-c-cmake/common/include/mysql/mysqld_error.h b/contrib/mariadb-connector-c-cmake/common/include/mysql/mysqld_error.h new file mode 100644 index 00000000000..95d26eef163 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/common/include/mysql/mysqld_error.h @@ -0,0 +1 @@ +#include diff --git a/contrib/mariadb-connector-c-cmake/linux_x86_64/include/config.h b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/config.h new file mode 100644 index 00000000000..90c42c97df6 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/config.h @@ -0,0 +1,269 @@ + +/* + * Include file constants (processed in LibmysqlIncludeFiles.txt 1 + */ +#define HAVE_ALLOCA_H 1 +/* #undef HAVE_BIGENDIAN */ +#define HAVE_SETLOCALE 1 +#define HAVE_NL_LANGINFO 1 +#define HAVE_ARPA_INET_H 1 +#define HAVE_CRYPT_H 1 +#define HAVE_DIRENT_H 1 +#define HAVE_DLFCN_H 1 +#define HAVE_EXECINFO_H 1 +#define HAVE_FCNTL_H 1 +#define HAVE_FENV_H 1 +#define HAVE_FLOAT_H 1 +/* #undef HAVE_FPU_CONTROL_H */ +#define HAVE_GRP_H 1 +/* #undef HAVE_IEEEFP_H */ +#define HAVE_LIMITS_H 1 +#define HAVE_MALLOC_H 1 +#define HAVE_MEMORY_H 1 +#define HAVE_NETINET_IN_H 1 +#define HAVE_PATHS_H 1 +#define HAVE_PWD_H 1 +#define HAVE_SCHED_H 1 +/* #undef HAVE_SELECT_H */ +#define HAVE_STDDEF_H 1 +#define HAVE_STDINT_H 1 +#define HAVE_STDLIB_H 1 +#define HAVE_STRING_H 1 +#define HAVE_STRINGS_H 1 +/* #undef HAVE_SYNCH_H */ +/* #undef HAVE_SYS_FPU_H */ +#define HAVE_SYS_IOCTL_H 1 +#define HAVE_SYS_IPC_H 1 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_SYS_PRCTL_H 1 +#define HAVE_SYS_SELECT_H 1 +#define HAVE_SYS_SHM_H 1 +#define HAVE_SYS_SOCKET_H 1 +#define HAVE_SYS_STAT_H 1 +/* #undef HAVE_SYS_STREAM_H */ +#define HAVE_SYS_TIMEB_H 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_UN_H 1 +/* #undef HAVE_SYSENT_H */ +#define HAVE_TERMIO_H 1 +#define HAVE_TERMIOS_H 1 +#define HAVE_UNISTD_H 1 +#define HAVE_UTIME_H 1 +#define HAVE_UCONTEXT_H 1 + +/* + * function definitions - processed in LibmysqlFunctions.txt + */ +#define HAVE_ACCESS 1 +/* #undef HAVE_AIOWAIT */ +#define HAVE_ALARM 1 +/* #undef HAVE_ALLOCA */ +#define HAVE_BCMP 1 +/* #undef HAVE_BFILL */ +/* #undef HAVE_BMOVE */ +#define HAVE_BZERO 1 +#define HAVE_CLOCK_GETTIME 1 +/* #undef HAVE_COMPRESS */ +/* #undef HAVE_CRYPT */ +#define HAVE_DLERROR 1 +#define HAVE_DLOPEN 1 +#define HAVE_FCHMOD 1 +#define HAVE_FCNTL 1 +/* #undef HAVE_FCONVERT */ +#define HAVE_FDATASYNC 1 +#define HAVE_FESETROUND 1 +#define HAVE_FINITE 1 +#define HAVE_FSEEKO 1 +#define HAVE_FSYNC 1 +#define HAVE_GETADDRINFO 1 +#define HAVE_GETCWD 1 +#define HAVE_GETHOSTBYADDR_R 1 +#define HAVE_GETHOSTBYNAME_R 1 +/* #undef HAVE_GETHRTIME */ +#define HAVE_GETNAMEINFO 1 +#define HAVE_GETPAGESIZE 1 +#define HAVE_GETPASS 1 +/* #undef HAVE_GETPASSPHRASE */ +#define HAVE_GETPWNAM 1 +#define HAVE_GETPWUID 1 +#define HAVE_GETRLIMIT 1 +#define HAVE_GETRUSAGE 1 +#define HAVE_GETWD 1 +#define HAVE_GMTIME_R 1 +#define HAVE_INITGROUPS 1 +#define HAVE_LDIV 1 +#define HAVE_LOCALTIME_R 1 +#define HAVE_LOG2 1 +#define HAVE_LONGJMP 1 +#define HAVE_LSTAT 1 +#define HAVE_MADVISE 1 +#define HAVE_MALLINFO 1 +#define HAVE_MEMALIGN 1 +#define HAVE_MEMCPY 1 +#define HAVE_MEMMOVE 1 +#define HAVE_MKSTEMP 1 +#define HAVE_MLOCK 1 +#define HAVE_MLOCKALL 1 +#define HAVE_MMAP 1 +#define HAVE_MMAP64 1 +#define HAVE_PERROR 1 +#define HAVE_POLL 1 +#define HAVE_PREAD 1 +/* #undef HAVE_PTHREAD_ATTR_CREATE */ +#define HAVE_PTHREAD_ATTR_GETSTACKSIZE 1 +/* #undef HAVE_PTHREAD_ATTR_SETPRIO */ +#define HAVE_PTHREAD_ATTR_SETSCHEDPARAM 1 +#define HAVE_PTHREAD_ATTR_SETSCOPE 1 +#define HAVE_PTHREAD_ATTR_SETSTACKSIZE 1 +/* #undef HAVE_PTHREAD_CONDATTR_CREATE */ +/* #undef HAVE_PTHREAD_INIT */ +#define HAVE_PTHREAD_KEY_DELETE 1 +#define HAVE_PTHREAD_KILL 1 +#define HAVE_PTHREAD_RWLOCK_RDLOCK 1 +/* #undef HAVE_PTHREAD_SETPRIO_NP */ +#define HAVE_PTHREAD_SETSCHEDPARAM 1 +#define HAVE_PTHREAD_SIGMASK 1 +/* #undef HAVE_PTHREAD_THREADMASK */ +/* #undef HAVE_PTHREAD_YIELD_NP */ +#define HAVE_READDIR_R 1 +#define HAVE_READLINK 1 +#define HAVE_REALPATH 1 +#define HAVE_RENAME 1 +#define HAVE_SCHED_YIELD 1 +#define HAVE_SELECT 1 +/* #undef HAVE_SETFD */ +/* #undef HAVE_SETFILEPOINTER */ +#define HAVE_SIGNAL 1 +#define HAVE_SIGACTION 1 +/* #undef HAVE_SIGTHREADMASK */ +#define HAVE_SIGWAIT 1 +#define HAVE_SLEEP 1 +#define HAVE_SNPRINTF 1 +/* #undef HAVE_SQLITE */ +#define HAVE_STPCPY 1 +#define HAVE_STRERROR 1 +/* #undef HAVE_STRLCPY */ +#define HAVE_STRNLEN 1 +#define HAVE_STRPBRK 1 +#define HAVE_STRSEP 1 +#define HAVE_STRSTR 1 +#define HAVE_STRTOK_R 1 +#define HAVE_STRTOL 1 +#define HAVE_STRTOLL 1 +#define HAVE_STRTOUL 1 +#define HAVE_STRTOULL 1 +/* #undef HAVE_TELL */ +/* #undef HAVE_THR_SETCONCURRENCY */ +/* #undef HAVE_THR_YIELD */ +#define HAVE_VASPRINTF 1 +#define HAVE_VSNPRINTF 1 + +/* + * types and sizes + */ +/* Types we may use */ +#define SIZEOF_CHAR 1 +#if defined(SIZEOF_CHAR) +# define HAVE_CHAR 1 +#endif + +#define SIZEOF_CHARP 8 +#if defined(SIZEOF_CHARP) +# define HAVE_CHARP 1 +#endif + +#define SIZEOF_SHORT 2 +#if defined(SIZEOF_SHORT) +# define HAVE_SHORT 1 +#endif + +#define SIZEOF_INT 4 +#if defined(SIZEOF_INT) +# define HAVE_INT 1 +#endif + +#define SIZEOF_LONG 8 +#if defined(SIZEOF_LONG) +# define HAVE_LONG 1 +#endif + +#define SIZEOF_LONG_LONG 8 +#if defined(SIZEOF_LONG_LONG) +# define HAVE_LONG_LONG 1 +#endif + + +#define SIZEOF_SIGSET_T 128 +#if defined(SIZEOF_SIGSET_T) +# define HAVE_SIGSET_T 1 +#endif + +#define SIZEOF_SIZE_T 8 +#if defined(SIZEOF_SIZE_T) +# define HAVE_SIZE_T 1 +#endif + +/* #undef SIZEOF_UCHAR */ +#if defined(SIZEOF_UCHAR) +# define HAVE_UCHAR 1 +#endif + +#define SIZEOF_UINT 4 +#if defined(SIZEOF_UINT) +# define HAVE_UINT 1 +#endif + +#define SIZEOF_ULONG 8 +#if defined(SIZEOF_ULONG) +# define HAVE_ULONG 1 +#endif + +/* #undef SIZEOF_INT8 */ +#if defined(SIZEOF_INT8) +# define HAVE_INT8 1 +#endif +/* #undef SIZEOF_UINT8 */ +#if defined(SIZEOF_UINT8) +# define HAVE_UINT8 1 +#endif + +/* #undef SIZEOF_INT16 */ +#if defined(SIZEOF_INT16) +# define HAVE_INT16 1 +#endif +/* #undef SIZEOF_UINT16 */ +#if defined(SIZEOF_UINT16) +# define HAVE_UINT16 1 +#endif + +/* #undef SIZEOF_INT32 */ +#if defined(SIZEOF_INT32) +# define HAVE_INT32 1 +#endif +/* #undef SIZEOF_UINT32 */ +#if defined(SIZEOF_UINT32) +# define HAVE_UINT32 1 +#endif +/* #undef SIZEOF_U_INT32_T */ +#if defined(SIZEOF_U_INT32_T) +# define HAVE_U_INT32_T 1 +#endif + +/* #undef SIZEOF_INT64 */ +#if defined(SIZEOF_INT64) +# define HAVE_INT64 1 +#endif +/* #undef SIZEOF_UINT64 */ +#if defined(SIZEOF_UINT64) +# define HAVE_UINT64 1 +#endif + +/* #undef SIZEOF_SOCKLEN_T */ +#if defined(SIZEOF_SOCKLEN_T) +# define HAVE_SOCKLEN_T 1 +#endif + +#define SOCKET_SIZE_TYPE socklen_t + +#define MARIADB_DEFAULT_CHARSET "latin1" + diff --git a/contrib/mariadb-connector-c-cmake/linux_x86_64/include/ma_config.h b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/ma_config.h new file mode 100644 index 00000000000..90c42c97df6 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/ma_config.h @@ -0,0 +1,269 @@ + +/* + * Include file constants (processed in LibmysqlIncludeFiles.txt 1 + */ +#define HAVE_ALLOCA_H 1 +/* #undef HAVE_BIGENDIAN */ +#define HAVE_SETLOCALE 1 +#define HAVE_NL_LANGINFO 1 +#define HAVE_ARPA_INET_H 1 +#define HAVE_CRYPT_H 1 +#define HAVE_DIRENT_H 1 +#define HAVE_DLFCN_H 1 +#define HAVE_EXECINFO_H 1 +#define HAVE_FCNTL_H 1 +#define HAVE_FENV_H 1 +#define HAVE_FLOAT_H 1 +/* #undef HAVE_FPU_CONTROL_H */ +#define HAVE_GRP_H 1 +/* #undef HAVE_IEEEFP_H */ +#define HAVE_LIMITS_H 1 +#define HAVE_MALLOC_H 1 +#define HAVE_MEMORY_H 1 +#define HAVE_NETINET_IN_H 1 +#define HAVE_PATHS_H 1 +#define HAVE_PWD_H 1 +#define HAVE_SCHED_H 1 +/* #undef HAVE_SELECT_H */ +#define HAVE_STDDEF_H 1 +#define HAVE_STDINT_H 1 +#define HAVE_STDLIB_H 1 +#define HAVE_STRING_H 1 +#define HAVE_STRINGS_H 1 +/* #undef HAVE_SYNCH_H */ +/* #undef HAVE_SYS_FPU_H */ +#define HAVE_SYS_IOCTL_H 1 +#define HAVE_SYS_IPC_H 1 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_SYS_PRCTL_H 1 +#define HAVE_SYS_SELECT_H 1 +#define HAVE_SYS_SHM_H 1 +#define HAVE_SYS_SOCKET_H 1 +#define HAVE_SYS_STAT_H 1 +/* #undef HAVE_SYS_STREAM_H */ +#define HAVE_SYS_TIMEB_H 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_UN_H 1 +/* #undef HAVE_SYSENT_H */ +#define HAVE_TERMIO_H 1 +#define HAVE_TERMIOS_H 1 +#define HAVE_UNISTD_H 1 +#define HAVE_UTIME_H 1 +#define HAVE_UCONTEXT_H 1 + +/* + * function definitions - processed in LibmysqlFunctions.txt + */ +#define HAVE_ACCESS 1 +/* #undef HAVE_AIOWAIT */ +#define HAVE_ALARM 1 +/* #undef HAVE_ALLOCA */ +#define HAVE_BCMP 1 +/* #undef HAVE_BFILL */ +/* #undef HAVE_BMOVE */ +#define HAVE_BZERO 1 +#define HAVE_CLOCK_GETTIME 1 +/* #undef HAVE_COMPRESS */ +/* #undef HAVE_CRYPT */ +#define HAVE_DLERROR 1 +#define HAVE_DLOPEN 1 +#define HAVE_FCHMOD 1 +#define HAVE_FCNTL 1 +/* #undef HAVE_FCONVERT */ +#define HAVE_FDATASYNC 1 +#define HAVE_FESETROUND 1 +#define HAVE_FINITE 1 +#define HAVE_FSEEKO 1 +#define HAVE_FSYNC 1 +#define HAVE_GETADDRINFO 1 +#define HAVE_GETCWD 1 +#define HAVE_GETHOSTBYADDR_R 1 +#define HAVE_GETHOSTBYNAME_R 1 +/* #undef HAVE_GETHRTIME */ +#define HAVE_GETNAMEINFO 1 +#define HAVE_GETPAGESIZE 1 +#define HAVE_GETPASS 1 +/* #undef HAVE_GETPASSPHRASE */ +#define HAVE_GETPWNAM 1 +#define HAVE_GETPWUID 1 +#define HAVE_GETRLIMIT 1 +#define HAVE_GETRUSAGE 1 +#define HAVE_GETWD 1 +#define HAVE_GMTIME_R 1 +#define HAVE_INITGROUPS 1 +#define HAVE_LDIV 1 +#define HAVE_LOCALTIME_R 1 +#define HAVE_LOG2 1 +#define HAVE_LONGJMP 1 +#define HAVE_LSTAT 1 +#define HAVE_MADVISE 1 +#define HAVE_MALLINFO 1 +#define HAVE_MEMALIGN 1 +#define HAVE_MEMCPY 1 +#define HAVE_MEMMOVE 1 +#define HAVE_MKSTEMP 1 +#define HAVE_MLOCK 1 +#define HAVE_MLOCKALL 1 +#define HAVE_MMAP 1 +#define HAVE_MMAP64 1 +#define HAVE_PERROR 1 +#define HAVE_POLL 1 +#define HAVE_PREAD 1 +/* #undef HAVE_PTHREAD_ATTR_CREATE */ +#define HAVE_PTHREAD_ATTR_GETSTACKSIZE 1 +/* #undef HAVE_PTHREAD_ATTR_SETPRIO */ +#define HAVE_PTHREAD_ATTR_SETSCHEDPARAM 1 +#define HAVE_PTHREAD_ATTR_SETSCOPE 1 +#define HAVE_PTHREAD_ATTR_SETSTACKSIZE 1 +/* #undef HAVE_PTHREAD_CONDATTR_CREATE */ +/* #undef HAVE_PTHREAD_INIT */ +#define HAVE_PTHREAD_KEY_DELETE 1 +#define HAVE_PTHREAD_KILL 1 +#define HAVE_PTHREAD_RWLOCK_RDLOCK 1 +/* #undef HAVE_PTHREAD_SETPRIO_NP */ +#define HAVE_PTHREAD_SETSCHEDPARAM 1 +#define HAVE_PTHREAD_SIGMASK 1 +/* #undef HAVE_PTHREAD_THREADMASK */ +/* #undef HAVE_PTHREAD_YIELD_NP */ +#define HAVE_READDIR_R 1 +#define HAVE_READLINK 1 +#define HAVE_REALPATH 1 +#define HAVE_RENAME 1 +#define HAVE_SCHED_YIELD 1 +#define HAVE_SELECT 1 +/* #undef HAVE_SETFD */ +/* #undef HAVE_SETFILEPOINTER */ +#define HAVE_SIGNAL 1 +#define HAVE_SIGACTION 1 +/* #undef HAVE_SIGTHREADMASK */ +#define HAVE_SIGWAIT 1 +#define HAVE_SLEEP 1 +#define HAVE_SNPRINTF 1 +/* #undef HAVE_SQLITE */ +#define HAVE_STPCPY 1 +#define HAVE_STRERROR 1 +/* #undef HAVE_STRLCPY */ +#define HAVE_STRNLEN 1 +#define HAVE_STRPBRK 1 +#define HAVE_STRSEP 1 +#define HAVE_STRSTR 1 +#define HAVE_STRTOK_R 1 +#define HAVE_STRTOL 1 +#define HAVE_STRTOLL 1 +#define HAVE_STRTOUL 1 +#define HAVE_STRTOULL 1 +/* #undef HAVE_TELL */ +/* #undef HAVE_THR_SETCONCURRENCY */ +/* #undef HAVE_THR_YIELD */ +#define HAVE_VASPRINTF 1 +#define HAVE_VSNPRINTF 1 + +/* + * types and sizes + */ +/* Types we may use */ +#define SIZEOF_CHAR 1 +#if defined(SIZEOF_CHAR) +# define HAVE_CHAR 1 +#endif + +#define SIZEOF_CHARP 8 +#if defined(SIZEOF_CHARP) +# define HAVE_CHARP 1 +#endif + +#define SIZEOF_SHORT 2 +#if defined(SIZEOF_SHORT) +# define HAVE_SHORT 1 +#endif + +#define SIZEOF_INT 4 +#if defined(SIZEOF_INT) +# define HAVE_INT 1 +#endif + +#define SIZEOF_LONG 8 +#if defined(SIZEOF_LONG) +# define HAVE_LONG 1 +#endif + +#define SIZEOF_LONG_LONG 8 +#if defined(SIZEOF_LONG_LONG) +# define HAVE_LONG_LONG 1 +#endif + + +#define SIZEOF_SIGSET_T 128 +#if defined(SIZEOF_SIGSET_T) +# define HAVE_SIGSET_T 1 +#endif + +#define SIZEOF_SIZE_T 8 +#if defined(SIZEOF_SIZE_T) +# define HAVE_SIZE_T 1 +#endif + +/* #undef SIZEOF_UCHAR */ +#if defined(SIZEOF_UCHAR) +# define HAVE_UCHAR 1 +#endif + +#define SIZEOF_UINT 4 +#if defined(SIZEOF_UINT) +# define HAVE_UINT 1 +#endif + +#define SIZEOF_ULONG 8 +#if defined(SIZEOF_ULONG) +# define HAVE_ULONG 1 +#endif + +/* #undef SIZEOF_INT8 */ +#if defined(SIZEOF_INT8) +# define HAVE_INT8 1 +#endif +/* #undef SIZEOF_UINT8 */ +#if defined(SIZEOF_UINT8) +# define HAVE_UINT8 1 +#endif + +/* #undef SIZEOF_INT16 */ +#if defined(SIZEOF_INT16) +# define HAVE_INT16 1 +#endif +/* #undef SIZEOF_UINT16 */ +#if defined(SIZEOF_UINT16) +# define HAVE_UINT16 1 +#endif + +/* #undef SIZEOF_INT32 */ +#if defined(SIZEOF_INT32) +# define HAVE_INT32 1 +#endif +/* #undef SIZEOF_UINT32 */ +#if defined(SIZEOF_UINT32) +# define HAVE_UINT32 1 +#endif +/* #undef SIZEOF_U_INT32_T */ +#if defined(SIZEOF_U_INT32_T) +# define HAVE_U_INT32_T 1 +#endif + +/* #undef SIZEOF_INT64 */ +#if defined(SIZEOF_INT64) +# define HAVE_INT64 1 +#endif +/* #undef SIZEOF_UINT64 */ +#if defined(SIZEOF_UINT64) +# define HAVE_UINT64 1 +#endif + +/* #undef SIZEOF_SOCKLEN_T */ +#if defined(SIZEOF_SOCKLEN_T) +# define HAVE_SOCKLEN_T 1 +#endif + +#define SOCKET_SIZE_TYPE socklen_t + +#define MARIADB_DEFAULT_CHARSET "latin1" + diff --git a/contrib/mariadb-connector-c-cmake/linux_x86_64/include/mariadb_version.h b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/mariadb_version.h new file mode 100644 index 00000000000..821a7f8add2 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/mariadb_version.h @@ -0,0 +1,36 @@ +/* Copyright Abandoned 1996, 1999, 2001 MySQL AB + This file is public domain and comes with NO WARRANTY of any kind */ + +/* Version numbers for protocol & mysqld */ + +#ifndef _mariadb_version_h_ +#define _mariadb_version_h_ + +#ifdef _CUSTOMCONFIG_ +#include +#else +#define PROTOCOL_VERSION 10 +#define MARIADB_CLIENT_VERSION_STR "10.3.6" +#define MARIADB_BASE_VERSION "mariadb-10.3" +#define MARIADB_VERSION_ID 100306 +#define MYSQL_VERSION_ID 100306 +#define MARIADB_PORT 3306 +#define MARIADB_UNIX_ADDR "/var/run/mysqld/mysqld.sock" +#define MYSQL_CONFIG_NAME "my" + +#define MARIADB_PACKAGE_VERSION "3.0.6" +#define MARIADB_PACKAGE_VERSION_ID 30006 +#define MARIADB_SYSTEM_TYPE "Linux" +#define MARIADB_MACHINE_TYPE "x86_64" +#define MARIADB_PLUGINDIR "lib/mariadb/plugin" + +/* mysqld compile time options */ +#ifndef MYSQL_CHARSET +#define MYSQL_CHARSET "" +#endif +#endif + +/* Source information */ +#define CC_SOURCE_REVISION "a0fd36cc5a5313414a5a2ebe9322577a29b4782a" + +#endif /* _mariadb_version_h_ */ diff --git a/contrib/mariadb-connector-c-cmake/linux_x86_64/libmariadb/ma_client_plugin.c b/contrib/mariadb-connector-c-cmake/linux_x86_64/libmariadb/ma_client_plugin.c new file mode 100644 index 00000000000..b7fdcdbcb85 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/linux_x86_64/libmariadb/ma_client_plugin.c @@ -0,0 +1,499 @@ +/* Copyright (C) 2010 - 2012 Sergei Golubchik and Monty Program Ab + 2015-2016 MariaDB Corporation AB + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not see + or write to the Free Software Foundation, Inc., + 51 Franklin St., Fifth Floor, Boston, MA 02110, USA */ + +/** + @file + + Support code for the client side (libmariadb) plugins + + Client plugins are somewhat different from server plugins, they are simpler. + + They do not need to be installed or in any way explicitly loaded on the + client, they are loaded automatically on demand. + One client plugin per shared object, soname *must* match the plugin name. + + There is no reference counting and no unloading either. +*/ + +#if _MSC_VER +/* Silence warnings about variable 'unused' being used. */ +#define FORCE_INIT_OF_VARS 1 +#endif + +#include +#include +#include +#include +#include + +#include "errmsg.h" +#include + +struct st_client_plugin_int { + struct st_client_plugin_int *next; + void *dlhandle; + struct st_mysql_client_plugin *plugin; +}; + +static my_bool initialized= 0; +static MA_MEM_ROOT mem_root; + +static uint valid_plugins[][2]= { + {MYSQL_CLIENT_AUTHENTICATION_PLUGIN, MYSQL_CLIENT_AUTHENTICATION_PLUGIN_INTERFACE_VERSION}, + {MARIADB_CLIENT_PVIO_PLUGIN, MARIADB_CLIENT_PVIO_PLUGIN_INTERFACE_VERSION}, + {MARIADB_CLIENT_TRACE_PLUGIN, MARIADB_CLIENT_TRACE_PLUGIN_INTERFACE_VERSION}, + {MARIADB_CLIENT_CONNECTION_PLUGIN, MARIADB_CLIENT_CONNECTION_PLUGIN_INTERFACE_VERSION}, + {0, 0} +}; + +/* + Loaded plugins are stored in a linked list. + The list is append-only, the elements are added to the head (like in a stack). + The elements are added under a mutex, but the list can be read and traversed + without any mutex because once an element is added to the list, it stays + there. The main purpose of a mutex is to prevent two threads from + loading the same plugin twice in parallel. +*/ + + +struct st_client_plugin_int *plugin_list[MYSQL_CLIENT_MAX_PLUGINS + MARIADB_CLIENT_MAX_PLUGINS]; +#ifdef THREAD +static pthread_mutex_t LOCK_load_client_plugin; +#endif + + extern struct st_mysql_client_plugin mysql_native_password_client_plugin; + extern struct st_mysql_client_plugin mysql_old_password_client_plugin; + extern struct st_mysql_client_plugin pvio_socket_client_plugin; + + +struct st_mysql_client_plugin *mysql_client_builtins[]= +{ + (struct st_mysql_client_plugin *)&mysql_native_password_client_plugin, + (struct st_mysql_client_plugin *)&mysql_old_password_client_plugin, + (struct st_mysql_client_plugin *)&pvio_socket_client_plugin, + + 0 +}; + + +static int is_not_initialized(MYSQL *mysql, const char *name) +{ + if (initialized) + return 0; + + my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, + SQLSTATE_UNKNOWN, ER(CR_AUTH_PLUGIN_CANNOT_LOAD), + name, "not initialized"); + return 1; +} + +static int get_plugin_nr(uint type) +{ + uint i= 0; + for(; valid_plugins[i][1]; i++) + if (valid_plugins[i][0] == type) + return i; + return -1; +} + +static const char *check_plugin_version(struct st_mysql_client_plugin *plugin, unsigned int version) +{ + if (plugin->interface_version < version || + (plugin->interface_version >> 8) > (version >> 8)) + return "Incompatible client plugin interface"; + return 0; +} + +/** + finds a plugin in the list + + @param name plugin name to search for + @param type plugin type + + @note this does NOT necessarily need a mutex, take care! + + @retval a pointer to a found plugin or 0 +*/ +static struct st_mysql_client_plugin *find_plugin(const char *name, int type) +{ + struct st_client_plugin_int *p; + int plugin_nr= get_plugin_nr(type); + + DBUG_ASSERT(initialized); + if (plugin_nr == -1) + return 0; + + if (!name) + return plugin_list[plugin_nr]->plugin; + + for (p= plugin_list[plugin_nr]; p; p= p->next) + { + if (strcmp(p->plugin->name, name) == 0) + return p->plugin; + } + return NULL; +} + + +/** + verifies the plugin and adds it to the list + + @param mysql MYSQL structure (for error reporting) + @param plugin plugin to install + @param dlhandle a handle to the shared object (returned by dlopen) + or 0 if the plugin was not dynamically loaded + @param argc number of arguments in the 'va_list args' + @param args arguments passed to the plugin initialization function + + @retval a pointer to an installed plugin or 0 +*/ + +static struct st_mysql_client_plugin * +add_plugin(MYSQL *mysql, struct st_mysql_client_plugin *plugin, void *dlhandle, + int argc, va_list args) +{ + const char *errmsg; + struct st_client_plugin_int plugin_int, *p; + char errbuf[1024]; + int plugin_nr; + + DBUG_ASSERT(initialized); + + plugin_int.plugin= plugin; + plugin_int.dlhandle= dlhandle; + + if ((plugin_nr= get_plugin_nr(plugin->type)) == -1) + { + errmsg= "Unknown client plugin type"; + goto err1; + } + if ((errmsg= check_plugin_version(plugin, valid_plugins[plugin_nr][1]))) + goto err1; + + /* Call the plugin initialization function, if any */ + if (plugin->init && plugin->init(errbuf, sizeof(errbuf), argc, args)) + { + errmsg= errbuf; + goto err1; + } + + p= (struct st_client_plugin_int *) + ma_memdup_root(&mem_root, (char *)&plugin_int, sizeof(plugin_int)); + + if (!p) + { + errmsg= "Out of memory"; + goto err2; + } + +#ifdef THREAD + safe_mutex_assert_owner(&LOCK_load_client_plugin); +#endif + + p->next= plugin_list[plugin_nr]; + plugin_list[plugin_nr]= p; + + return plugin; + +err2: + if (plugin->deinit) + plugin->deinit(); +err1: + my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, SQLSTATE_UNKNOWN, + ER(CR_AUTH_PLUGIN_CANNOT_LOAD), plugin->name, errmsg); + if (dlhandle) + (void)dlclose(dlhandle); + return NULL; +} + + +/** + Loads plugins which are specified in the environment variable + LIBMYSQL_PLUGINS. + + Multiple plugins must be separated by semicolon. This function doesn't + return or log an error. + + The function is be called by mysql_client_plugin_init + + @todo + Support extended syntax, passing parameters to plugins, for example + LIBMYSQL_PLUGINS="plugin1(param1,param2);plugin2;..." + or + LIBMYSQL_PLUGINS="plugin1=int:param1,str:param2;plugin2;..." +*/ + +static void load_env_plugins(MYSQL *mysql) +{ + char *plugs, *free_env, *s= getenv("LIBMYSQL_PLUGINS"); + + if (ma_check_env_str(s)) + return; + + free_env= strdup(s); + plugs= s= free_env; + + do { + if ((s= strchr(plugs, ';'))) + *s= '\0'; + mysql_load_plugin(mysql, plugs, -1, 0); + plugs= s + 1; + } while (s); + + free(free_env); +} + +/********** extern functions to be used by libmariadb *********************/ + +/** + Initializes the client plugin layer. + + This function must be called before any other client plugin function. + + @retval 0 successful + @retval != 0 error occurred +*/ + +int mysql_client_plugin_init() +{ + MYSQL mysql; + struct st_mysql_client_plugin **builtin; + va_list unused; + LINT_INIT_STRUCT(unused); + + if (initialized) + return 0; + + memset(&mysql, 0, sizeof(mysql)); /* dummy mysql for set_mysql_extended_error */ + + pthread_mutex_init(&LOCK_load_client_plugin, MY_MUTEX_INIT_SLOW); + ma_init_alloc_root(&mem_root, 128, 128); + + memset(&plugin_list, 0, sizeof(plugin_list)); + + initialized= 1; + + pthread_mutex_lock(&LOCK_load_client_plugin); + for (builtin= mysql_client_builtins; *builtin; builtin++) + add_plugin(&mysql, *builtin, 0, 0, unused); + + pthread_mutex_unlock(&LOCK_load_client_plugin); + + load_env_plugins(&mysql); + + return 0; +} + + +/** + Deinitializes the client plugin layer. + + Unloades all client plugins and frees any associated resources. +*/ + +void mysql_client_plugin_deinit() +{ + int i; + struct st_client_plugin_int *p; + + if (!initialized) + return; + + for (i=0; i < MYSQL_CLIENT_MAX_PLUGINS; i++) + for (p= plugin_list[i]; p; p= p->next) + { + if (p->plugin->deinit) + p->plugin->deinit(); + if (p->dlhandle) + (void)dlclose(p->dlhandle); + } + + memset(&plugin_list, 0, sizeof(plugin_list)); + initialized= 0; + ma_free_root(&mem_root, MYF(0)); + pthread_mutex_destroy(&LOCK_load_client_plugin); +} + +/************* public facing functions, for client consumption *********/ + +/* see for a full description */ +struct st_mysql_client_plugin * STDCALL +mysql_client_register_plugin(MYSQL *mysql, + struct st_mysql_client_plugin *plugin) +{ + va_list unused; + LINT_INIT_STRUCT(unused); + + if (is_not_initialized(mysql, plugin->name)) + return NULL; + + pthread_mutex_lock(&LOCK_load_client_plugin); + + /* make sure the plugin wasn't loaded meanwhile */ + if (find_plugin(plugin->name, plugin->type)) + { + my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, + SQLSTATE_UNKNOWN, ER(CR_AUTH_PLUGIN_CANNOT_LOAD), + plugin->name, "it is already loaded"); + plugin= NULL; + } + else + plugin= add_plugin(mysql, plugin, 0, 0, unused); + + pthread_mutex_unlock(&LOCK_load_client_plugin); + return plugin; +} + + +/* see for a full description */ +struct st_mysql_client_plugin * STDCALL +mysql_load_plugin_v(MYSQL *mysql, const char *name, int type, + int argc, va_list args) +{ + const char *errmsg; +#ifdef _WIN32 + char errbuf[1024]; +#endif + char dlpath[FN_REFLEN+1]; + void *sym, *dlhandle = NULL; + struct st_mysql_client_plugin *plugin; + char *env_plugin_dir= getenv("MARIADB_PLUGIN_DIR"); + + CLEAR_CLIENT_ERROR(mysql); + if (is_not_initialized(mysql, name)) + return NULL; + + pthread_mutex_lock(&LOCK_load_client_plugin); + + /* make sure the plugin wasn't loaded meanwhile */ + if (type >= 0 && find_plugin(name, type)) + { + errmsg= "it is already loaded"; + goto err; + } + + /* Compile dll path */ + snprintf(dlpath, sizeof(dlpath) - 1, "%s/%s%s", + mysql->options.extension && mysql->options.extension->plugin_dir ? + mysql->options.extension->plugin_dir : (env_plugin_dir) ? env_plugin_dir : + MARIADB_PLUGINDIR, name, SO_EXT); + + /* Open new dll handle */ + if (!(dlhandle= dlopen((const char *)dlpath, RTLD_NOW))) + { +#ifdef _WIN32 + char winmsg[255]; + size_t len; + winmsg[0] = 0; + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + GetLastError(), + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + winmsg, 255, NULL); + len= strlen(winmsg); + while (len > 0 && (winmsg[len - 1] == '\n' || winmsg[len - 1] == '\r')) + len--; + if (len) + winmsg[len] = 0; + snprintf(errbuf, sizeof(errbuf), "%s Library path is '%s'", winmsg, dlpath); + errmsg= errbuf; +#else + errmsg= dlerror(); +#endif + goto err; + } + + + if (!(sym= dlsym(dlhandle, plugin_declarations_sym))) + { + errmsg= "not a plugin"; + (void)dlclose(dlhandle); + goto err; + } + + plugin= (struct st_mysql_client_plugin*)sym; + + if (type >=0 && type != plugin->type) + { + errmsg= "type mismatch"; + goto err; + } + + if (strcmp(name, plugin->name)) + { + errmsg= "name mismatch"; + goto err; + } + + if (type < 0 && find_plugin(name, plugin->type)) + { + errmsg= "it is already loaded"; + goto err; + } + + plugin= add_plugin(mysql, plugin, dlhandle, argc, args); + + pthread_mutex_unlock(&LOCK_load_client_plugin); + + return plugin; + +err: + if (dlhandle) + dlclose(dlhandle); + pthread_mutex_unlock(&LOCK_load_client_plugin); + my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, SQLSTATE_UNKNOWN, + ER(CR_AUTH_PLUGIN_CANNOT_LOAD), name, errmsg); + return NULL; +} + + +/* see for a full description */ +struct st_mysql_client_plugin * STDCALL +mysql_load_plugin(MYSQL *mysql, const char *name, int type, int argc, ...) +{ + struct st_mysql_client_plugin *p; + va_list args; + va_start(args, argc); + p= mysql_load_plugin_v(mysql, name, type, argc, args); + va_end(args); + return p; +} + +/* see for a full description */ +struct st_mysql_client_plugin * STDCALL +mysql_client_find_plugin(MYSQL *mysql, const char *name, int type) +{ + struct st_mysql_client_plugin *p; + int plugin_nr= get_plugin_nr(type); + + if (is_not_initialized(mysql, name)) + return NULL; + + if (plugin_nr == -1) + { + my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, SQLSTATE_UNKNOWN, + ER(CR_AUTH_PLUGIN_CANNOT_LOAD), name, "invalid type"); + } + + if ((p= find_plugin(name, type))) + return p; + + /* not found, load it */ + return mysql_load_plugin(mysql, name, type, 0); +} + diff --git a/contrib/murmurhash/CMakeLists.txt b/contrib/murmurhash/CMakeLists.txt new file mode 100644 index 00000000000..c5e467a2d6d --- /dev/null +++ b/contrib/murmurhash/CMakeLists.txt @@ -0,0 +1,7 @@ +add_library(murmurhash + src/murmurhash2.cpp + src/murmurhash3.cpp + include/murmurhash2.h + include/murmurhash3.h) + +target_include_directories (murmurhash PUBLIC include) diff --git a/contrib/murmurhash/LICENSE b/contrib/murmurhash/LICENSE new file mode 100644 index 00000000000..f6cdede60b8 --- /dev/null +++ b/contrib/murmurhash/LICENSE @@ -0,0 +1 @@ +MurmurHash was written by Austin Appleby, and is placed in the publicdomain. The author hereby disclaims copyright to this source code. diff --git a/contrib/murmurhash/README b/contrib/murmurhash/README new file mode 100644 index 00000000000..5428d30b26d --- /dev/null +++ b/contrib/murmurhash/README @@ -0,0 +1,6 @@ +Original URL: https://github.com/aappleby/smhasher + +version: +commit 61a0530f28277f2e850bfc39600ce61d02b518de +author aappleby@gmail.com +date 2016-01-09T06:07:17Z diff --git a/contrib/murmurhash/include/murmurhash2.h b/contrib/murmurhash/include/murmurhash2.h new file mode 100644 index 00000000000..e95cf2a4d85 --- /dev/null +++ b/contrib/murmurhash/include/murmurhash2.h @@ -0,0 +1,35 @@ +//----------------------------------------------------------------------------- +// MurmurHash2 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#ifndef _MURMURHASH2_H_ +#define _MURMURHASH2_H_ + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) && (_MSC_VER < 1600) + +typedef unsigned char uint8_t; +typedef unsigned int uint32_t; +typedef unsigned __int64 uint64_t; + +// Other compilers + +#else // defined(_MSC_VER) + +#include + +#endif // !defined(_MSC_VER) + +uint32_t MurmurHash2 (const void * key, int len, uint32_t seed); +uint64_t MurmurHash64A (const void * key, int len, uint64_t seed); +uint64_t MurmurHash64B (const void * key, int len, uint64_t seed); +uint32_t MurmurHash2A (const void * key, int len, uint32_t seed); +uint32_t MurmurHashNeutral2 (const void * key, int len, uint32_t seed); +uint32_t MurmurHashAligned2 (const void * key, int len, uint32_t seed); + +#endif // _MURMURHASH2_H_ + diff --git a/contrib/murmurhash/include/murmurhash3.h b/contrib/murmurhash/include/murmurhash3.h new file mode 100644 index 00000000000..e1c6d34976c --- /dev/null +++ b/contrib/murmurhash/include/murmurhash3.h @@ -0,0 +1,37 @@ +//----------------------------------------------------------------------------- +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#ifndef _MURMURHASH3_H_ +#define _MURMURHASH3_H_ + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) && (_MSC_VER < 1600) + +typedef unsigned char uint8_t; +typedef unsigned int uint32_t; +typedef unsigned __int64 uint64_t; + +// Other compilers + +#else // defined(_MSC_VER) + +#include + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out ); + +void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out ); + +void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out ); + +//----------------------------------------------------------------------------- + +#endif // _MURMURHASH3_H_ diff --git a/contrib/murmurhash/src/murmurhash2.cpp b/contrib/murmurhash/src/murmurhash2.cpp new file mode 100644 index 00000000000..8a41ba02d98 --- /dev/null +++ b/contrib/murmurhash/src/murmurhash2.cpp @@ -0,0 +1,421 @@ +// MurmurHash2 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +// Note - This code makes a few assumptions about how your machine behaves - + +// 1. We can read a 4-byte value from any address without crashing +// 2. sizeof(int) == 4 + +// And it has a few limitations - + +// 1. It will not work incrementally. +// 2. It will not produce the same results on little-endian and big-endian +// machines. + +#include "murmurhash2.h" + +// Platform-specific functions and macros +// Microsoft Visual Studio + +#if defined(_MSC_VER) + +#define BIG_CONSTANT(x) (x) + +// Other compilers + +#else // defined(_MSC_VER) + +#define BIG_CONSTANT(x) (x##LLU) + +#endif // !defined(_MSC_VER) + + +uint32_t MurmurHash2(const void * key, int len, uint32_t seed) +{ + // 'm' and 'r' are mixing constants generated offline. + // They're not really 'magic', they just happen to work well. + + const uint32_t m = 0x5bd1e995; + const int r = 24; + + // Initialize the hash to a 'random' value + + uint32_t h = seed ^ len; + + // Mix 4 bytes at a time into the hash + + const unsigned char * data = reinterpret_cast(key); + + while (len >= 4) + { + uint32_t k = *reinterpret_cast(data); + k *= m; + k ^= k >> r; + k *= m; + + h *= m; + h ^= k; + + data += 4; + len -= 4; + } + + // Handle the last few bytes of the input array + + switch (len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + // Do a few final mixes of the hash to ensure the last few + // bytes are well-incorporated. + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +// MurmurHash2, 64-bit versions, by Austin Appleby + +// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment +// and endian-ness issues if used across multiple platforms. + +// 64-bit hash for 64-bit platforms + +uint64_t MurmurHash64A(const void * key, int len, uint64_t seed) +{ + const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995); + const int r = 47; + + uint64_t h = seed ^ (len * m); + + const uint64_t * data = reinterpret_cast(key); + const uint64_t * end = data + (len/8); + + while (data != end) + { + uint64_t k = *data++; + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + } + + const unsigned char * data2 = reinterpret_cast(data); + + switch (len & 7) + { + case 7: h ^= static_cast(data2[6]) << 48; + case 6: h ^= static_cast(data2[5]) << 40; + case 5: h ^= static_cast(data2[4]) << 32; + case 4: h ^= static_cast(data2[3]) << 24; + case 3: h ^= static_cast(data2[2]) << 16; + case 2: h ^= static_cast(data2[1]) << 8; + case 1: h ^= static_cast(data2[0]); + h *= m; + }; + + h ^= h >> r; + h *= m; + h ^= h >> r; + + return h; +} + + +// 64-bit hash for 32-bit platforms + +uint64_t MurmurHash64B(const void * key, int len, uint64_t seed) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + uint32_t h1 = static_cast(seed) ^ len; + uint32_t h2 = static_cast(seed >> 32); + + const uint32_t * data = reinterpret_cast(key); + + while (len >= 8) + { + uint32_t k1 = *data++; + k1 *= m; k1 ^= k1 >> r; k1 *= m; + h1 *= m; h1 ^= k1; + len -= 4; + + uint32_t k2 = *data++; + k2 *= m; k2 ^= k2 >> r; k2 *= m; + h2 *= m; h2 ^= k2; + len -= 4; + } + + if (len >= 4) + { + uint32_t k1 = *data++; + k1 *= m; k1 ^= k1 >> r; k1 *= m; + h1 *= m; h1 ^= k1; + len -= 4; + } + + switch (len) + { + case 3: h2 ^= reinterpret_cast(data)[2] << 16; + case 2: h2 ^= reinterpret_cast(data)[1] << 8; + case 1: h2 ^= reinterpret_cast(data)[0]; + h2 *= m; + }; + + h1 ^= h2 >> 18; h1 *= m; + h2 ^= h1 >> 22; h2 *= m; + h1 ^= h2 >> 17; h1 *= m; + h2 ^= h1 >> 19; h2 *= m; + + uint64_t h = h1; + + h = (h << 32) | h2; + + return h; +} + +// MurmurHash2A, by Austin Appleby + +// This is a variant of MurmurHash2 modified to use the Merkle-Damgard +// construction. Bulk speed should be identical to Murmur2, small-key speed +// will be 10%-20% slower due to the added overhead at the end of the hash. + +// This variant fixes a minor issue where null keys were more likely to +// collide with each other than expected, and also makes the function +// more amenable to incremental implementations. + +#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } + +uint32_t MurmurHash2A(const void * key, int len, uint32_t seed) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + uint32_t l = len; + + const unsigned char * data = reinterpret_cast(key); + + uint32_t h = seed; + + while (len >= 4) + { + uint32_t k = *reinterpret_cast(data); + mmix(h,k); + data += 4; + len -= 4; + } + + uint32_t t = 0; + + switch (len) + { + case 3: t ^= data[2] << 16; + case 2: t ^= data[1] << 8; + case 1: t ^= data[0]; + }; + + mmix(h,t); + mmix(h,l); + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +// MurmurHashNeutral2, by Austin Appleby + +// Same as MurmurHash2, but endian- and alignment-neutral. +// Half the speed though, alas. + +uint32_t MurmurHashNeutral2(const void * key, int len, uint32_t seed) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + uint32_t h = seed ^ len; + + const unsigned char * data = reinterpret_cast(key); + + while (len >= 4) + { + uint32_t k; + + k = data[0]; + k |= data[1] << 8; + k |= data[2] << 16; + k |= data[3] << 24; + + k *= m; + k ^= k >> r; + k *= m; + + h *= m; + h ^= k; + + data += 4; + len -= 4; + } + + switch (len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +//----------------------------------------------------------------------------- +// MurmurHashAligned2, by Austin Appleby + +// Same algorithm as MurmurHash2, but only does aligned reads - should be safer +// on certain platforms. + +// Performance will be lower than MurmurHash2 + +#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } + + +uint32_t MurmurHashAligned2(const void * key, int len, uint32_t seed) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + const unsigned char * data = reinterpret_cast(key); + + uint32_t h = seed ^ len; + + int align = reinterpret_cast(data) & 3; + + if (align && (len >= 4)) + { + // Pre-load the temp registers + + uint32_t t = 0, d = 0; + + switch (align) + { + case 1: t |= data[2] << 16; + case 2: t |= data[1] << 8; + case 3: t |= data[0]; + } + + t <<= (8 * align); + + data += 4-align; + len -= 4-align; + + int sl = 8 * (4-align); + int sr = 8 * align; + + // Mix + + while (len >= 4) + { + d = *(reinterpret_cast(data)); + t = (t >> sr) | (d << sl); + + uint32_t k = t; + + MIX(h,k,m); + + t = d; + + data += 4; + len -= 4; + } + + // Handle leftover data in temp registers + + d = 0; + + if (len >= align) + { + switch (align) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + } + + uint32_t k = (t >> sr) | (d << sl); + MIX(h,k,m); + + data += align; + len -= align; + + //---------- + // Handle tail bytes + + switch (len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + } + else + { + switch (len) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + case 0: h ^= (t >> sr) | (d << sl); + h *= m; + } + } + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; + } + else + { + while (len >= 4) + { + uint32_t k = *reinterpret_cast(data); + + MIX(h,k,m); + + data += 4; + len -= 4; + } + + // Handle tail bytes + + switch (len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; + } +} \ No newline at end of file diff --git a/contrib/murmurhash/src/murmurhash3.cpp b/contrib/murmurhash/src/murmurhash3.cpp new file mode 100644 index 00000000000..2831bf5c73b --- /dev/null +++ b/contrib/murmurhash/src/murmurhash3.cpp @@ -0,0 +1,331 @@ +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +// Note - The x86 and x64 versions do _not_ produce the same results, as the +// algorithms are optimized for their respective platforms. You can still +// compile and run any of them on any platform, but your performance with the +// non-native version will be less than optimal. + +#include "murmurhash3.h" + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) + +#define FORCE_INLINE __forceinline + +#include + +#define ROTL32(x,y) _rotl(x,y) +#define ROTL64(x,y) _rotl64(x,y) + +#define BIG_CONSTANT(x) (x) + +// Other compilers + +#else // defined(_MSC_VER) + +#define FORCE_INLINE inline __attribute__((always_inline)) + +inline uint32_t rotl32 ( uint32_t x, int8_t r ) +{ + return (x << r) | (x >> (32 - r)); +} + +inline uint64_t rotl64 ( uint64_t x, int8_t r ) +{ + return (x << r) | (x >> (64 - r)); +} + +#define ROTL32(x,y) rotl32(x,y) +#define ROTL64(x,y) rotl64(x,y) + +#define BIG_CONSTANT(x) (x##LLU) + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- +// Block read - if your platform needs to do endian-swapping or can only +// handle aligned reads, do the conversion here + +FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) +{ + return p[i]; +} + +FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i ) +{ + return p[i]; +} + +//----------------------------------------------------------------------------- +// Finalization mix - force all bits of a hash block to avalanche + +FORCE_INLINE uint32_t fmix32 ( uint32_t h ) +{ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return h; +} + +//---------- + +FORCE_INLINE uint64_t fmix64 ( uint64_t k ) +{ + k ^= k >> 33; + k *= BIG_CONSTANT(0xff51afd7ed558ccd); + k ^= k >> 33; + k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); + k ^= k >> 33; + + return k; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_32 ( const void * key, int len, + uint32_t seed, void * out ) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 4; + + uint32_t h1 = seed; + + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + //---------- + // body + + const uint32_t * blocks = (const uint32_t *)(data + nblocks*4); + + for(int i = -nblocks; i; i++) + { + uint32_t k1 = getblock32(blocks,i); + + k1 *= c1; + k1 = ROTL32(k1,15); + k1 *= c2; + + h1 ^= k1; + h1 = ROTL32(h1,13); + h1 = h1*5+0xe6546b64; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks*4); + + uint32_t k1 = 0; + + switch(len & 3) + { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; + k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; + + h1 = fmix32(h1); + + *(uint32_t*)out = h1; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_128 ( const void * key, const int len, + uint32_t seed, void * out ) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 16; + + uint32_t h1 = seed; + uint32_t h2 = seed; + uint32_t h3 = seed; + uint32_t h4 = seed; + + const uint32_t c1 = 0x239b961b; + const uint32_t c2 = 0xab0e9789; + const uint32_t c3 = 0x38b34ae5; + const uint32_t c4 = 0xa1e38b93; + + //---------- + // body + + const uint32_t * blocks = (const uint32_t *)(data + nblocks*16); + + for(int i = -nblocks; i; i++) + { + uint32_t k1 = getblock32(blocks,i*4+0); + uint32_t k2 = getblock32(blocks,i*4+1); + uint32_t k3 = getblock32(blocks,i*4+2); + uint32_t k4 = getblock32(blocks,i*4+3); + + k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; + + h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b; + + k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; + + h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747; + + k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; + + h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35; + + k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; + + h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks*16); + + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; + + switch(len & 15) + { + case 15: k4 ^= tail[14] << 16; + case 14: k4 ^= tail[13] << 8; + case 13: k4 ^= tail[12] << 0; + k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; + + case 12: k3 ^= tail[11] << 24; + case 11: k3 ^= tail[10] << 16; + case 10: k3 ^= tail[ 9] << 8; + case 9: k3 ^= tail[ 8] << 0; + k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; + + case 8: k2 ^= tail[ 7] << 24; + case 7: k2 ^= tail[ 6] << 16; + case 6: k2 ^= tail[ 5] << 8; + case 5: k2 ^= tail[ 4] << 0; + k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; + + case 4: k1 ^= tail[ 3] << 24; + case 3: k1 ^= tail[ 2] << 16; + case 2: k1 ^= tail[ 1] << 8; + case 1: k1 ^= tail[ 0] << 0; + k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + h1 = fmix32(h1); + h2 = fmix32(h2); + h3 = fmix32(h3); + h4 = fmix32(h4); + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + ((uint32_t*)out)[0] = h1; + ((uint32_t*)out)[1] = h2; + ((uint32_t*)out)[2] = h3; + ((uint32_t*)out)[3] = h4; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x64_128 ( const void * key, const int len, + const uint32_t seed, void * out ) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 16; + + uint64_t h1 = seed; + uint64_t h2 = seed; + + const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); + const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); + + //---------- + // body + + const uint64_t * blocks = (const uint64_t *)(data); + + for(int i = 0; i < nblocks; i++) + { + uint64_t k1 = getblock64(blocks,i*2+0); + uint64_t k2 = getblock64(blocks,i*2+1); + + k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; + + h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729; + + k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; + + h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks*16); + + uint64_t k1 = 0; + uint64_t k2 = 0; + + switch(len & 15) + { + case 15: k2 ^= ((uint64_t)tail[14]) << 48; + case 14: k2 ^= ((uint64_t)tail[13]) << 40; + case 13: k2 ^= ((uint64_t)tail[12]) << 32; + case 12: k2 ^= ((uint64_t)tail[11]) << 24; + case 11: k2 ^= ((uint64_t)tail[10]) << 16; + case 10: k2 ^= ((uint64_t)tail[ 9]) << 8; + case 9: k2 ^= ((uint64_t)tail[ 8]) << 0; + k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; + + case 8: k1 ^= ((uint64_t)tail[ 7]) << 56; + case 7: k1 ^= ((uint64_t)tail[ 6]) << 48; + case 6: k1 ^= ((uint64_t)tail[ 5]) << 40; + case 5: k1 ^= ((uint64_t)tail[ 4]) << 32; + case 4: k1 ^= ((uint64_t)tail[ 3]) << 24; + case 3: k1 ^= ((uint64_t)tail[ 2]) << 16; + case 2: k1 ^= ((uint64_t)tail[ 1]) << 8; + case 1: k1 ^= ((uint64_t)tail[ 0]) << 0; + k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; h2 ^= len; + + h1 += h2; + h2 += h1; + + h1 = fmix64(h1); + h2 = fmix64(h2); + + h1 += h2; + h2 += h1; + + ((uint64_t*)out)[0] = h1; + ((uint64_t*)out)[1] = h2; +} diff --git a/contrib/poco b/contrib/poco index 3a2d0a833a2..3df947389e6 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 3a2d0a833a22ef5e1164a9ada54e3253cb038904 +Subproject commit 3df947389e6d9654919002797bdd86ed190b3963 diff --git a/contrib/re2_st/CMakeLists.txt b/contrib/re2_st/CMakeLists.txt index cd0f97e08f3..6bc7fd8f343 100644 --- a/contrib/re2_st/CMakeLists.txt +++ b/contrib/re2_st/CMakeLists.txt @@ -12,26 +12,27 @@ endforeach () add_library (re2_st ${RE2_ST_SOURCES}) target_compile_definitions (re2_st PRIVATE NDEBUG NO_THREADS re2=re2_st) -target_include_directories (re2_st PRIVATE . PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${RE2_SOURCE_DIR}) +target_include_directories (re2_st PRIVATE .) +target_include_directories (re2_st SYSTEM PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${RE2_SOURCE_DIR}) file (MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/re2_st) foreach (FILENAME filtered_re2.h re2.h set.h stringpiece.h) - add_custom_command (OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}" - COMMAND ${CMAKE_COMMAND} -DSOURCE_FILENAME="${RE2_SOURCE_DIR}/re2/${FILENAME}" - -DTARGET_FILENAME="${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}" - -P "${CMAKE_CURRENT_SOURCE_DIR}/re2_transform.cmake" - COMMENT "Creating ${FILENAME} for re2_st library.") - add_custom_target (transform_${FILENAME} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}") - add_dependencies (re2_st transform_${FILENAME}) + add_custom_command (OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}" + COMMAND ${CMAKE_COMMAND} -DSOURCE_FILENAME="${RE2_SOURCE_DIR}/re2/${FILENAME}" + -DTARGET_FILENAME="${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}" + -P "${CMAKE_CURRENT_SOURCE_DIR}/re2_transform.cmake" + COMMENT "Creating ${FILENAME} for re2_st library.") + add_custom_target (transform_${FILENAME} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}") + add_dependencies (re2_st transform_${FILENAME}) endforeach () file (MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/util) foreach (FILENAME mutex.h) - add_custom_command (OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}" - COMMAND ${CMAKE_COMMAND} -DSOURCE_FILENAME="${RE2_SOURCE_DIR}/util/${FILENAME}" - -DTARGET_FILENAME="${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}" - -P "${CMAKE_CURRENT_SOURCE_DIR}/re2_transform.cmake" - COMMENT "Creating ${FILENAME} for re2_st library.") - add_custom_target (transform_${FILENAME} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}") - add_dependencies (re2_st transform_${FILENAME}) + add_custom_command (OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}" + COMMAND ${CMAKE_COMMAND} -DSOURCE_FILENAME="${RE2_SOURCE_DIR}/util/${FILENAME}" + -DTARGET_FILENAME="${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}" + -P "${CMAKE_CURRENT_SOURCE_DIR}/re2_transform.cmake" + COMMENT "Creating ${FILENAME} for re2_st library.") + add_custom_target (transform_${FILENAME} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}") + add_dependencies (re2_st transform_${FILENAME}) endforeach () diff --git a/contrib/ssl b/contrib/ssl index 6fbe1c6f404..de02224a42c 160000 --- a/contrib/ssl +++ b/contrib/ssl @@ -1 +1 @@ -Subproject commit 6fbe1c6f404193989c5f6a63115d80fbe34ce2a3 +Subproject commit de02224a42c69e3d8c9112c82018816f821878d0 diff --git a/contrib/unixodbc b/contrib/unixodbc new file mode 160000 index 00000000000..b0ad30f7f62 --- /dev/null +++ b/contrib/unixodbc @@ -0,0 +1 @@ +Subproject commit b0ad30f7f6289c12b76f04bfb9d466374bb32168 diff --git a/contrib/unixodbc-cmake/CMakeLists.txt b/contrib/unixodbc-cmake/CMakeLists.txt new file mode 100644 index 00000000000..4f9f6b41538 --- /dev/null +++ b/contrib/unixodbc-cmake/CMakeLists.txt @@ -0,0 +1,288 @@ +set(ODBC_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/unixodbc) +set(ODBC_BINARY_DIR ${CMAKE_BINARY_DIR}/contrib/unixodbc) + + +set(SRCS +${ODBC_SOURCE_DIR}/libltdl/lt__alloc.c +${ODBC_SOURCE_DIR}/libltdl/lt__strl.c +${ODBC_SOURCE_DIR}/libltdl/ltdl.c +${ODBC_SOURCE_DIR}/libltdl/lt_dlloader.c +${ODBC_SOURCE_DIR}/libltdl/slist.c +${ODBC_SOURCE_DIR}/libltdl/lt_error.c +${ODBC_SOURCE_DIR}/libltdl/loaders/dlopen.c +${ODBC_SOURCE_DIR}/libltdl/loaders/preopen.c +#${ODBC_SOURCE_DIR}/libltdl/lt__dirent.c +#${ODBC_SOURCE_DIR}/libltdl/lt__argz.c +#${ODBC_SOURCE_DIR}/libltdl/loaders/dld_link.c +#${ODBC_SOURCE_DIR}/libltdl/loaders/load_add_on.c +#${ODBC_SOURCE_DIR}/libltdl/loaders/shl_load.c +#${ODBC_SOURCE_DIR}/libltdl/loaders/loadlibrary.c +#${ODBC_SOURCE_DIR}/libltdl/loaders/dyld.c + +# This file is generated by 'libtool' inside libltdl directory and then removed. +${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/libltdl/libltdlcS.c +) + +add_library(ltdl STATIC ${SRCS}) + +target_include_directories(ltdl PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/libltdl) +target_include_directories(ltdl PUBLIC ${ODBC_SOURCE_DIR}/libltdl) +target_include_directories(ltdl PUBLIC ${ODBC_SOURCE_DIR}/libltdl/libltdl) + +target_compile_definitions(ltdl PRIVATE -DHAVE_CONFIG_H -DLTDL -DLTDLOPEN=libltdlc) + +target_compile_options(ltdl PRIVATE -Wno-constant-logical-operand -Wno-unknown-warning-option -O2) + + +set(SRCS +${ODBC_SOURCE_DIR}/DriverManager/__attribute.c +${ODBC_SOURCE_DIR}/DriverManager/__connection.c +${ODBC_SOURCE_DIR}/DriverManager/__handles.c +${ODBC_SOURCE_DIR}/DriverManager/__info.c +${ODBC_SOURCE_DIR}/DriverManager/__stats.c +${ODBC_SOURCE_DIR}/DriverManager/SQLAllocConnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLAllocEnv.c +${ODBC_SOURCE_DIR}/DriverManager/SQLAllocHandle.c +${ODBC_SOURCE_DIR}/DriverManager/SQLAllocHandleStd.c +${ODBC_SOURCE_DIR}/DriverManager/SQLAllocStmt.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBindCol.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBindParam.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBindParameter.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBrowseConnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBrowseConnectW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBulkOperations.c +${ODBC_SOURCE_DIR}/DriverManager/SQLCancel.c +${ODBC_SOURCE_DIR}/DriverManager/SQLCancelHandle.c +${ODBC_SOURCE_DIR}/DriverManager/SQLCloseCursor.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColAttribute.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColAttributes.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColAttributesW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColAttributeW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColumnPrivileges.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColumnPrivilegesW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColumns.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColumnsW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLConnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLConnectW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLCopyDesc.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDataSources.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDataSourcesW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDescribeCol.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDescribeColW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDescribeParam.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDisconnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDriverConnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDriverConnectW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDrivers.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDriversW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLEndTran.c +${ODBC_SOURCE_DIR}/DriverManager/SQLError.c +${ODBC_SOURCE_DIR}/DriverManager/SQLErrorW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLExecDirect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLExecDirectW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLExecute.c +${ODBC_SOURCE_DIR}/DriverManager/SQLExtendedFetch.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFetch.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFetchScroll.c +${ODBC_SOURCE_DIR}/DriverManager/SQLForeignKeys.c +${ODBC_SOURCE_DIR}/DriverManager/SQLForeignKeysW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFreeConnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFreeEnv.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFreeHandle.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFreeStmt.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectAttrW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectOption.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectOptionW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetCursorName.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetCursorNameW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetData.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescField.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescFieldW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescRec.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescRecW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagField.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagFieldW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagRec.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagRecW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetEnvAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetFunctions.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetInfo.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetInfoW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetStmtAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetStmtAttrW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetStmtOption.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetTypeInfo.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetTypeInfoW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLMoreResults.c +${ODBC_SOURCE_DIR}/DriverManager/SQLNativeSql.c +${ODBC_SOURCE_DIR}/DriverManager/SQLNativeSqlW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLNumParams.c +${ODBC_SOURCE_DIR}/DriverManager/SQLNumResultCols.c +${ODBC_SOURCE_DIR}/DriverManager/SQLParamData.c +${ODBC_SOURCE_DIR}/DriverManager/SQLParamOptions.c +${ODBC_SOURCE_DIR}/DriverManager/SQLPrepare.c +${ODBC_SOURCE_DIR}/DriverManager/SQLPrepareW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLPrimaryKeys.c +${ODBC_SOURCE_DIR}/DriverManager/SQLPrimaryKeysW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLProcedureColumns.c +${ODBC_SOURCE_DIR}/DriverManager/SQLProcedureColumnsW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLProcedures.c +${ODBC_SOURCE_DIR}/DriverManager/SQLProceduresW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLPutData.c +${ODBC_SOURCE_DIR}/DriverManager/SQLRowCount.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectAttrW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectOption.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectOptionW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetCursorName.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetCursorNameW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetDescField.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetDescFieldW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetDescRec.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetEnvAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetParam.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetPos.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetScrollOptions.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtAttrW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtOption.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtOptionW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSpecialColumns.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSpecialColumnsW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLStatistics.c +${ODBC_SOURCE_DIR}/DriverManager/SQLStatisticsW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLTablePrivileges.c +${ODBC_SOURCE_DIR}/DriverManager/SQLTablePrivilegesW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLTables.c +${ODBC_SOURCE_DIR}/DriverManager/SQLTablesW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLTransact.c + +${ODBC_SOURCE_DIR}/odbcinst/_logging.c +${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_ConfigModeINI.c +${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTConstructProperties.c +${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTDestructProperties.c +${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_GetEntries.c +${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_GetSections.c +${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTSetProperty.c +${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_SystemINI.c +${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_UserINI.c +${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTValidateProperties.c +${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTValidateProperty.c +${ODBC_SOURCE_DIR}/odbcinst/SQLConfigDataSource.c +${ODBC_SOURCE_DIR}/odbcinst/SQLConfigDriver.c +${ODBC_SOURCE_DIR}/odbcinst/SQLCreateDataSource.c +${ODBC_SOURCE_DIR}/odbcinst/_SQLDriverConnectPrompt.c +${ODBC_SOURCE_DIR}/odbcinst/SQLGetAvailableDrivers.c +${ODBC_SOURCE_DIR}/odbcinst/SQLGetConfigMode.c +${ODBC_SOURCE_DIR}/odbcinst/_SQLGetInstalledDrivers.c +${ODBC_SOURCE_DIR}/odbcinst/SQLGetInstalledDrivers.c +${ODBC_SOURCE_DIR}/odbcinst/SQLGetPrivateProfileString.c +${ODBC_SOURCE_DIR}/odbcinst/SQLGetTranslator.c +${ODBC_SOURCE_DIR}/odbcinst/SQLInstallDriverEx.c +${ODBC_SOURCE_DIR}/odbcinst/SQLInstallDriverManager.c +${ODBC_SOURCE_DIR}/odbcinst/SQLInstallerError.c +${ODBC_SOURCE_DIR}/odbcinst/SQLInstallODBC.c +${ODBC_SOURCE_DIR}/odbcinst/SQLInstallTranslatorEx.c +${ODBC_SOURCE_DIR}/odbcinst/SQLManageDataSources.c +${ODBC_SOURCE_DIR}/odbcinst/SQLPostInstallerError.c +${ODBC_SOURCE_DIR}/odbcinst/SQLReadFileDSN.c +${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveDriver.c +${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveDriverManager.c +${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveDSNFromIni.c +${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveTranslator.c +${ODBC_SOURCE_DIR}/odbcinst/SQLSetConfigMode.c +${ODBC_SOURCE_DIR}/odbcinst/SQLValidDSN.c +${ODBC_SOURCE_DIR}/odbcinst/SQLWriteDSNToIni.c +${ODBC_SOURCE_DIR}/odbcinst/SQLWriteFileDSN.c +${ODBC_SOURCE_DIR}/odbcinst/_SQLWriteInstalledDrivers.c +${ODBC_SOURCE_DIR}/odbcinst/SQLWritePrivateProfileString.c + +${ODBC_SOURCE_DIR}/ini/iniAllTrim.c +${ODBC_SOURCE_DIR}/ini/iniAppend.c +${ODBC_SOURCE_DIR}/ini/iniClose.c +${ODBC_SOURCE_DIR}/ini/iniCommit.c +${ODBC_SOURCE_DIR}/ini/iniCursor.c +${ODBC_SOURCE_DIR}/ini/iniDelete.c +${ODBC_SOURCE_DIR}/ini/_iniDump.c +${ODBC_SOURCE_DIR}/ini/iniElement.c +${ODBC_SOURCE_DIR}/ini/iniElementCount.c +${ODBC_SOURCE_DIR}/ini/iniGetBookmark.c +${ODBC_SOURCE_DIR}/ini/iniGotoBookmark.c +${ODBC_SOURCE_DIR}/ini/iniObject.c +${ODBC_SOURCE_DIR}/ini/iniObjectDelete.c +${ODBC_SOURCE_DIR}/ini/iniObjectEOL.c +${ODBC_SOURCE_DIR}/ini/iniObjectFirst.c +${ODBC_SOURCE_DIR}/ini/iniObjectInsert.c +${ODBC_SOURCE_DIR}/ini/iniObjectLast.c +${ODBC_SOURCE_DIR}/ini/iniObjectNext.c +${ODBC_SOURCE_DIR}/ini/_iniObjectRead.c +${ODBC_SOURCE_DIR}/ini/iniObjectSeek.c +${ODBC_SOURCE_DIR}/ini/iniObjectSeekSure.c +${ODBC_SOURCE_DIR}/ini/iniObjectUpdate.c +${ODBC_SOURCE_DIR}/ini/iniOpen.c +${ODBC_SOURCE_DIR}/ini/iniProperty.c +${ODBC_SOURCE_DIR}/ini/iniPropertyDelete.c +${ODBC_SOURCE_DIR}/ini/iniPropertyEOL.c +${ODBC_SOURCE_DIR}/ini/iniPropertyFirst.c +${ODBC_SOURCE_DIR}/ini/iniPropertyInsert.c +${ODBC_SOURCE_DIR}/ini/iniPropertyLast.c +${ODBC_SOURCE_DIR}/ini/iniPropertyNext.c +${ODBC_SOURCE_DIR}/ini/_iniPropertyRead.c +${ODBC_SOURCE_DIR}/ini/iniPropertySeek.c +${ODBC_SOURCE_DIR}/ini/iniPropertySeekSure.c +${ODBC_SOURCE_DIR}/ini/iniPropertyUpdate.c +${ODBC_SOURCE_DIR}/ini/iniPropertyValue.c +${ODBC_SOURCE_DIR}/ini/_iniScanUntilObject.c +${ODBC_SOURCE_DIR}/ini/iniToUpper.c +${ODBC_SOURCE_DIR}/ini/iniValue.c + +${ODBC_SOURCE_DIR}/log/logClear.c +${ODBC_SOURCE_DIR}/log/logClose.c +${ODBC_SOURCE_DIR}/log/_logFreeMsg.c +${ODBC_SOURCE_DIR}/log/logOn.c +${ODBC_SOURCE_DIR}/log/logOpen.c +${ODBC_SOURCE_DIR}/log/logPeekMsg.c +${ODBC_SOURCE_DIR}/log/logPopMsg.c +${ODBC_SOURCE_DIR}/log/logPushMsg.c + +${ODBC_SOURCE_DIR}/lst/_lstAdjustCurrent.c +${ODBC_SOURCE_DIR}/lst/lstAppend.c +${ODBC_SOURCE_DIR}/lst/lstClose.c +${ODBC_SOURCE_DIR}/lst/lstDelete.c +${ODBC_SOURCE_DIR}/lst/_lstDump.c +${ODBC_SOURCE_DIR}/lst/lstEOL.c +${ODBC_SOURCE_DIR}/lst/lstFirst.c +${ODBC_SOURCE_DIR}/lst/_lstFreeItem.c +${ODBC_SOURCE_DIR}/lst/lstGetBookMark.c +${ODBC_SOURCE_DIR}/lst/lstGet.c +${ODBC_SOURCE_DIR}/lst/lstGotoBookMark.c +${ODBC_SOURCE_DIR}/lst/lstGoto.c +${ODBC_SOURCE_DIR}/lst/lstInsert.c +${ODBC_SOURCE_DIR}/lst/lstLast.c +${ODBC_SOURCE_DIR}/lst/lstNext.c +${ODBC_SOURCE_DIR}/lst/_lstNextValidItem.c +${ODBC_SOURCE_DIR}/lst/lstOpen.c +${ODBC_SOURCE_DIR}/lst/lstOpenCursor.c +${ODBC_SOURCE_DIR}/lst/lstPrev.c +${ODBC_SOURCE_DIR}/lst/_lstPrevValidItem.c +${ODBC_SOURCE_DIR}/lst/lstSeek.c +${ODBC_SOURCE_DIR}/lst/lstSeekItem.c +${ODBC_SOURCE_DIR}/lst/lstSet.c +${ODBC_SOURCE_DIR}/lst/lstSetFreeFunc.c +${ODBC_SOURCE_DIR}/lst/_lstVisible.c +) + +add_library(unixodbc STATIC ${SRCS}) + +target_link_libraries(unixodbc ltdl) + +# SYSTEM_FILE_PATH was changed to /etc + +target_include_directories(unixodbc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/private) +target_include_directories(unixodbc PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64) +target_include_directories(unixodbc PUBLIC ${ODBC_SOURCE_DIR}/include) + +target_compile_definitions(unixodbc PRIVATE -DHAVE_CONFIG_H) + +target_compile_options(unixodbc PRIVATE -Wno-dangling-else -Wno-parentheses -Wno-misleading-indentation -Wno-unknown-warning-option -O2) diff --git a/contrib/unixodbc-cmake/linux_x86_64/libltdl/config.h b/contrib/unixodbc-cmake/linux_x86_64/libltdl/config.h new file mode 100644 index 00000000000..194779b2b98 --- /dev/null +++ b/contrib/unixodbc-cmake/linux_x86_64/libltdl/config.h @@ -0,0 +1,181 @@ +/* config.h. Generated from config-h.in by configure. */ +/* config-h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the `argz_add' function. */ +#define HAVE_ARGZ_ADD 1 + +/* Define to 1 if you have the `argz_append' function. */ +#define HAVE_ARGZ_APPEND 1 + +/* Define to 1 if you have the `argz_count' function. */ +#define HAVE_ARGZ_COUNT 1 + +/* Define to 1 if you have the `argz_create_sep' function. */ +#define HAVE_ARGZ_CREATE_SEP 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARGZ_H 1 + +/* Define to 1 if you have the `argz_insert' function. */ +#define HAVE_ARGZ_INSERT 1 + +/* Define to 1 if you have the `argz_next' function. */ +#define HAVE_ARGZ_NEXT 1 + +/* Define to 1 if you have the `argz_stringify' function. */ +#define HAVE_ARGZ_STRINGIFY 1 + +/* Define to 1 if you have the `closedir' function. */ +#define HAVE_CLOSEDIR 1 + +/* Define to 1 if you have the declaration of `cygwin_conv_path', and to 0 if + you don't. */ +/* #undef HAVE_DECL_CYGWIN_CONV_PATH */ + +/* Define to 1 if you have the header file. */ +#define HAVE_DIRENT_H 1 + +/* Define if you have the GNU dld library. */ +/* #undef HAVE_DLD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DLD_H */ + +/* Define to 1 if you have the `dlerror' function. */ +#define HAVE_DLERROR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DL_H */ + +/* Define if you have the _dyld_func_lookup function. */ +/* #undef HAVE_DYLD */ + +/* Define to 1 if the system has the type `error_t'. */ +#define HAVE_ERROR_T 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have the libdl library or equivalent. */ +#define HAVE_LIBDL 1 + +/* Define if libdlloader will be built on this platform */ +#define HAVE_LIBDLLOADER 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MACH_O_DYLD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `opendir' function. */ +#define HAVE_OPENDIR 1 + +/* Define if libtool can extract symbol lists from object files. */ +#define HAVE_PRELOADED_SYMBOLS 1 + +/* Define to 1 if you have the `readdir' function. */ +#define HAVE_READDIR 1 + +/* Define if you have the shl_load function. */ +/* #undef HAVE_SHL_LOAD */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strlcat' function. */ +/* #undef HAVE_STRLCAT */ + +/* Define to 1 if you have the `strlcpy' function. */ +/* #undef HAVE_STRLCPY */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* This value is set to 1 to indicate that the system argz facility works */ +#define HAVE_WORKING_ARGZ 1 + +/* Define if the OS needs help to load dependent libraries for dlopen(). */ +/* #undef LTDL_DLOPEN_DEPLIBS */ + +/* Define to the system default library search path. */ +#define LT_DLSEARCH_PATH "/lib:/usr/lib:/usr/lib/x86_64-linux-gnu/libfakeroot:/usr/local/lib:/usr/local/lib/x86_64-linux-gnu:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/lib32:/usr/lib32" + +/* The archive extension */ +#define LT_LIBEXT "a" + +/* The archive prefix */ +#define LT_LIBPREFIX "lib" + +/* Define to the extension used for runtime loadable modules, say, ".so". */ +#define LT_MODULE_EXT ".so" + +/* Define to the name of the environment variable that determines the run-time + module search path. */ +#define LT_MODULE_PATH_VAR "LD_LIBRARY_PATH" + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Define to the shared library suffix, say, ".dylib". */ +/* #undef LT_SHARED_EXT */ + +/* Define to the shared archive member specification, say "(shr.o)". */ +/* #undef LT_SHARED_LIB_MEMBER */ + +/* Define if dlsym() requires a leading underscore in symbol names. */ +/* #undef NEED_USCORE */ + +/* Name of package */ +#define PACKAGE "libltdl" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "bug-libtool@gnu.org" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libltdl" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libltdl 2.4.3a" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libltdl" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "2.4.3a" + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Version number of package */ +#define VERSION "2.4.3a" + +/* Define so that glibc/gnulib argp.h does not typedef error_t. */ +/* #undef __error_t_defined */ + +/* Define to a type to use for 'error_t' if it is not otherwise available. */ +/* #undef error_t */ diff --git a/contrib/unixodbc-cmake/linux_x86_64/libltdl/libltdlcS.c b/contrib/unixodbc-cmake/linux_x86_64/libltdl/libltdlcS.c new file mode 100644 index 00000000000..ca866eb5986 --- /dev/null +++ b/contrib/unixodbc-cmake/linux_x86_64/libltdl/libltdlcS.c @@ -0,0 +1,53 @@ +/* libltdlcS.c - symbol resolution table for 'libltdlc' dlsym emulation. */ +/* Generated by libtool (GNU libtool) 2.4.6 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored "-Wstrict-prototypes" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* External symbol declarations for the compiler. */ +extern int dlopen_LTX_get_vtable(); + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_libltdlc_LTX_preloaded_symbols[]; +LT_DLSYM_CONST lt_dlsymlist +lt_libltdlc_LTX_preloaded_symbols[] = +{ {"libltdlc", (void *) 0}, + {"dlopen.a", (void *) 0}, + {"dlopen_LTX_get_vtable", (void *) &dlopen_LTX_get_vtable}, + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_libltdlc_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif diff --git a/contrib/unixodbc-cmake/linux_x86_64/private/config.h b/contrib/unixodbc-cmake/linux_x86_64/private/config.h new file mode 100644 index 00000000000..d80a4da4665 --- /dev/null +++ b/contrib/unixodbc-cmake/linux_x86_64/private/config.h @@ -0,0 +1,496 @@ +/* config.h. Generated from config.h.in by configure. */ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Encoding to use for CHAR */ +#define ASCII_ENCODING "auto-search" + +/* Install bindir */ +#define BIN_PREFIX "/usr/local/bin" + +/* Use a semaphore to allow ODBCConfig to display running counts */ +/* #undef COLLECT_STATS */ + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Lib directory */ +#define DEFLIB_PATH "/usr/local/lib" + +/* Using ini cacheing */ +#define ENABLE_INI_CACHING /**/ + +/* Install exec_prefix */ +#define EXEC_PREFIX "/usr/local" + +/* Disable the precise but slow checking of the validity of handles */ +/* #undef FAST_HANDLE_VALIDATE */ + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#define HAVE_ALLOCA_H 1 + +/* Define to 1 if you have the `argz_add' function. */ +#define HAVE_ARGZ_ADD 1 + +/* Define to 1 if you have the `argz_append' function. */ +#define HAVE_ARGZ_APPEND 1 + +/* Define to 1 if you have the `argz_count' function. */ +#define HAVE_ARGZ_COUNT 1 + +/* Define to 1 if you have the `argz_create_sep' function. */ +#define HAVE_ARGZ_CREATE_SEP 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARGZ_H 1 + +/* Define to 1 if you have the `argz_insert' function. */ +#define HAVE_ARGZ_INSERT 1 + +/* Define to 1 if you have the `argz_next' function. */ +#define HAVE_ARGZ_NEXT 1 + +/* Define to 1 if you have the `argz_stringify' function. */ +#define HAVE_ARGZ_STRINGIFY 1 + +/* Define to 1 if you have the `atoll' function. */ +#define HAVE_ATOLL 1 + +/* Define to 1 if you have the `closedir' function. */ +#define HAVE_CLOSEDIR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_CRYPT_H 1 + +/* Define to 1 if you have the declaration of `cygwin_conv_path', and to 0 if + you don't. */ +/* #undef HAVE_DECL_CYGWIN_CONV_PATH */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define if you have the GNU dld library. */ +/* #undef HAVE_DLD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DLD_H */ + +/* Define to 1 if you have the `dlerror' function. */ +#define HAVE_DLERROR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DL_H */ + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* Define if you have the _dyld_func_lookup function. */ +/* #undef HAVE_DYLD */ + +/* Define to 1 if you have the `endpwent' function. */ +#define HAVE_ENDPWENT 1 + +/* Define to 1 if the system has the type `error_t'. */ +#define HAVE_ERROR_T 1 + +/* Define to 1 if you have the `ftime' function. */ +#define HAVE_FTIME 1 + +/* Define to 1 if you have the `ftok' function. */ +/* #undef HAVE_FTOK */ + +/* Define to 1 if you have the `getpwuid' function. */ +#define HAVE_GETPWUID 1 + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the `getuid' function. */ +#define HAVE_GETUID 1 + +/* Define if you have the iconv() function. */ +#define HAVE_ICONV 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have and nl_langinfo(CODESET). */ +#define HAVE_LANGINFO_CODESET 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_LANGINFO_H 1 + +/* Add -lcrypt to lib list */ +#define HAVE_LIBCRYPT /**/ + +/* Define if you have the libdl library or equivalent. */ +#define HAVE_LIBDL 1 + +/* Define if libdlloader will be built on this platform */ +#define HAVE_LIBDLLOADER 1 + +/* Use the -lpth thread library */ +/* #undef HAVE_LIBPTH */ + +/* Use -lpthread threading lib */ +#define HAVE_LIBPTHREAD 1 + +/* Use the -lthread threading lib */ +/* #undef HAVE_LIBTHREAD */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Use rentrant version of localtime */ +#define HAVE_LOCALTIME_R 1 + +/* Define if you have long long */ +#define HAVE_LONG_LONG 1 + +/* Define this if a modern libltdl is already installed */ +#define HAVE_LTDL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MACH_O_DYLD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MSQL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the `nl_langinfo' function. */ +#define HAVE_NL_LANGINFO 1 + +/* Define to 1 if you have the `opendir' function. */ +#define HAVE_OPENDIR 1 + +/* Define if libtool can extract symbol lists from object files. */ +#define HAVE_PRELOADED_SYMBOLS 1 + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if you have the `putenv' function. */ +#define HAVE_PUTENV 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PWD_H 1 + +/* Define to 1 if you have the `readdir' function. */ +#define HAVE_READDIR 1 + +/* Add readline support */ +#define HAVE_READLINE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_READLINE_HISTORY_H 1 + +/* Use the scandir lib */ +/* #undef HAVE_SCANDIR */ + +/* Define to 1 if you have the `semget' function. */ +/* #undef HAVE_SEMGET */ + +/* Define to 1 if you have the `semop' function. */ +/* #undef HAVE_SEMOP */ + +/* Define to 1 if you have the `setenv' function. */ +#define HAVE_SETENV 1 + +/* Define to 1 if you have the `setlocale' function. */ +#define HAVE_SETLOCALE 1 + +/* Define if you have the shl_load function. */ +/* #undef HAVE_SHL_LOAD */ + +/* Define to 1 if you have the `shmget' function. */ +/* #undef HAVE_SHMGET */ + +/* Define to 1 if you have the `snprintf' function. */ +/* #undef HAVE_SNPRINTF */ + +/* Define to 1 if you have the `socket' function. */ +#define HAVE_SOCKET 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDARG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strcasecmp' function. */ +#define HAVE_STRCASECMP 1 + +/* Define to 1 if you have the `strchr' function. */ +#define HAVE_STRCHR 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `stricmp' function. */ +/* #undef HAVE_STRICMP */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strlcat' function. */ +/* #undef HAVE_STRLCAT */ + +/* Define to 1 if you have the `strlcpy' function. */ +/* #undef HAVE_STRLCPY */ + +/* Define to 1 if you have the `strncasecmp' function. */ +#define HAVE_STRNCASECMP 1 + +/* Define to 1 if you have the `strnicmp' function. */ +/* #undef HAVE_STRNICMP */ + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYNCH_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_MALLOC_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SEM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIMEB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the `time' function. */ +#define HAVE_TIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_VARARGS_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* This value is set to 1 to indicate that the system argz facility works */ +#define HAVE_WORKING_ARGZ 1 + +/* Define as const if the declaration of iconv() needs const. */ +#define ICONV_CONST + +/* Install includedir */ +#define INCLUDE_PREFIX "/usr/local/include" + +/* Lib directory */ +#define LIB_PREFIX "/usr/local/lib" + +/* Define if the OS needs help to load dependent libraries for dlopen(). */ +/* #undef LTDL_DLOPEN_DEPLIBS */ + +/* Define to the system default library search path. */ +#define LT_DLSEARCH_PATH "/lib:/usr/lib:/usr/lib/x86_64-linux-gnu/libfakeroot:/usr/local/lib:/usr/local/lib/x86_64-linux-gnu:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/lib32:/usr/lib32" + +/* The archive extension */ +#define LT_LIBEXT "a" + +/* The archive prefix */ +#define LT_LIBPREFIX "lib" + +/* Define to the extension used for runtime loadable modules, say, ".so". */ +#define LT_MODULE_EXT ".so" + +/* Define to the name of the environment variable that determines the run-time + module search path. */ +#define LT_MODULE_PATH_VAR "LD_LIBRARY_PATH" + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Define to the shared library suffix, say, ".dylib". */ +/* #undef LT_SHARED_EXT */ + +/* Define to the shared archive member specification, say "(shr.o)". */ +/* #undef LT_SHARED_LIB_MEMBER */ + +/* Define if you need semundo union */ +/* #undef NEED_SEMUNDO_UNION */ + +/* Define if dlsym() requires a leading underscore in symbol names. */ +/* #undef NEED_USCORE */ + +/* Using OSX */ +/* #undef OSXHEADER */ + +/* Name of package */ +#define PACKAGE "unixODBC" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "nick@unixodbc.org" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "unixODBC" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "unixODBC 2.3.6" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "unixODBC" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "2.3.6" + +/* Platform is 64 bit */ +#define PLATFORM64 /**/ + +/* Install prefix */ +#define PREFIX "/usr/local" + +/* Using QNX */ +/* #undef QNX_LIBLTDL */ + +/* Shared lib extension */ +#define SHLIBEXT ".so" + +/* The size of `long', as computed by sizeof. */ +#define SIZEOF_LONG 8 + +/* The size of `long int', as computed by sizeof. */ +#define SIZEOF_LONG_INT 8 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* don't include unixODBC prefix in driver error messages */ +#define STRICT_ODBC_ERROR /**/ + +/* System file path */ +#define SYSTEM_FILE_PATH "/etc" + +/* Lib path */ +#define SYSTEM_LIB_PATH "/usr/local/lib" + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* Encoding to use for UNICODE */ +#define UNICODE_ENCODING "auto-search" + +/* Flag that we are not using another DM */ +#define UNIXODBC /**/ + +/* We are building inside the unixODBC source tree */ +#define UNIXODBC_SOURCE /**/ + +/* Version number of package */ +#define VERSION "2.3.6" + +/* Work with IBM drivers that use 32 bit handles on 64 bit platforms */ +/* #undef WITH_HANDLE_REDIRECT */ + +/* Define to 1 if `lex' declares `yytext' as a `char *' by default, not a + `char[]'. */ +/* #undef YYTEXT_POINTER */ + +/* Build flag for AIX */ +/* #undef _ALL_SOURCE */ + +/* Build flag for AIX */ +/* #undef _LONG_LONG */ + +/* Build flag for AIX */ +/* #undef _THREAD_SAFE */ + +/* Define so that glibc/gnulib argp.h does not typedef error_t. */ +/* #undef __error_t_defined */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to a type to use for 'error_t' if it is not otherwise available. */ +/* #undef error_t */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ diff --git a/contrib/unixodbc-cmake/linux_x86_64/unixodbc_conf.h b/contrib/unixodbc-cmake/linux_x86_64/unixodbc_conf.h new file mode 100644 index 00000000000..6597c85cea6 --- /dev/null +++ b/contrib/unixodbc-cmake/linux_x86_64/unixodbc_conf.h @@ -0,0 +1,60 @@ +/* unixodbc_conf.h. Generated from unixodbc_conf.h.in by configure. */ +#ifndef HAVE_UNISTD_H +#define HAVE_UNISTD_H 1 +#endif + +#ifndef HAVE_PWD_H +#define HAVE_PWD_H 1 +#endif + +#ifndef HAVE_SYS_TIME_H +#define HAVE_SYS_TIME_H 1 +#endif + +#ifndef ODBC_STD +/* #undef ODBC_STD */ +#endif + +#ifndef UNICODE +/* #undef UNICODE */ +#endif + +#ifndef GUID_DEFINED +/* #undef GUID_DEFINED */ +#endif + +#ifndef SQL_WCHART_CONVERT +/* #undef SQL_WCHART_CONVERT */ +#endif + +#ifndef HAVE_LONG_LONG +#define HAVE_LONG_LONG 1 +#endif + +#ifndef ODBCINT64_TYPEA +/* #undef ODBCINT64_TYPEA */ +#endif + +#ifndef UODBCINT64_TYPE +/* #undef UODBCINT64_TYPE */ +#endif + +#ifndef DISABLE_INI_CACHING +/* #undef DISABLE_INI_CACHING */ +#endif + +#ifndef SIZEOF_LONG_INT +#define SIZEOF_LONG_INT 8 +#endif + +#ifndef ALLREADY_HAVE_WINDOWS_TYPE +/* #undef ALLREADY_HAVE_WINDOWS_TYPE */ +#endif + +#ifndef DONT_TD_VOID +/* #undef DONT_TD_VOID */ +#endif + +#ifndef DO_YOU_KNOW_WHAT_YOUR_ARE_DOING +/* #undef DO_YOU_KNOW_WHAT_YOUR_ARE_DOING */ +#endif diff --git a/contrib/zlib-ng b/contrib/zlib-ng index e07a52dbaa3..9173b89d467 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit e07a52dbaa35d003f5659b221b29d220c091667b +Subproject commit 9173b89d46799582d20a30578e0aa9788bc7d6e1 diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index eaf21b0b6ac..8dba0991944 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -53,6 +53,7 @@ add_headers_and_sources(dbms src/Interpreters/ClusterProxy) add_headers_and_sources(dbms src/Columns) add_headers_and_sources(dbms src/Storages) add_headers_and_sources(dbms src/Storages/Distributed) +add_headers_and_sources(dbms src/Storages/Kafka) add_headers_and_sources(dbms src/Storages/MergeTree) add_headers_and_sources(dbms src/Client) add_headers_and_sources(dbms src/Formats) @@ -84,7 +85,7 @@ list (APPEND dbms_headers src/TableFunctions/ITableFunction.h src/TableFunctions add_library(clickhouse_common_io ${SPLIT_SHARED} ${clickhouse_common_io_headers} ${clickhouse_common_io_sources}) -if (ARCH_FREEBSD) +if (OS_FREEBSD) target_compile_definitions (clickhouse_common_io PUBLIC CLOCK_MONOTONIC_COARSE=CLOCK_MONOTONIC_FAST) endif () @@ -144,6 +145,7 @@ target_link_libraries (clickhouse_common_io ${EXECINFO_LIBRARY} ${ELF_LIBRARY} ${Boost_SYSTEM_LIBRARY} + apple_rt ${CMAKE_DL_LIBS} ) @@ -154,7 +156,6 @@ target_link_libraries (dbms ${MYSQLXX_LIBRARY} ${RE2_LIBRARY} ${RE2_ST_LIBRARY} - ${OPENSSL_CRYPTO_LIBRARY} ${BTRIE_LIBRARIES} ) @@ -217,6 +218,8 @@ if (USE_RDKAFKA) endif () endif () +target_link_libraries(dbms ${OPENSSL_CRYPTO_LIBRARY}) + target_link_libraries (dbms Threads::Threads ) @@ -244,8 +247,6 @@ add_subdirectory (programs) add_subdirectory (tests) if (ENABLE_TESTS) - include (${ClickHouse_SOURCE_DIR}/cmake/find_gtest.cmake) - if (USE_INTERNAL_GTEST_LIBRARY) # Google Test from sources add_subdirectory(${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest ${CMAKE_CURRENT_BINARY_DIR}/googletest) diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake index adc80346d8a..788ce74618a 100644 --- a/dbms/cmake/version.cmake +++ b/dbms/cmake/version.cmake @@ -1,30 +1,24 @@ # This strings autochanged from release_lib.sh: -set(VERSION_DESCRIBE v1.1.54389-testing) -set(VERSION_REVISION 54389) -set(VERSION_GITHASH 3894f094ea53a9411e30ed62ebcd1f7dec244086) +set(VERSION_REVISION 54405 CACHE STRING "") +set(VERSION_MAJOR 18 CACHE STRING "") +set(VERSION_MINOR 10 CACHE STRING "") +set(VERSION_PATCH 3 CACHE STRING "") +set(VERSION_GITHASH 1fa1b34f1ab01ea2e1a833eebd36a4806e529f52 CACHE STRING "") +set(VERSION_DESCRIBE v18.10.3-testing CACHE STRING "") +set(VERSION_STRING 18.10.3 CACHE STRING "") # end of autochange -set (VERSION_MAJOR 1) -set (VERSION_MINOR 1) -set (VERSION_PATCH ${VERSION_REVISION}) -set (VERSION_EXTRA "") -set (VERSION_TWEAK "") +set(VERSION_EXTRA "" CACHE STRING "") +set(VERSION_TWEAK "" CACHE STRING "") -set (VERSION_STRING "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}") if (VERSION_TWEAK) - set(VERSION_STRING "${VERSION_STRING}.${VERSION_TWEAK}") + string(CONCAT VERSION_STRING ${VERSION_STRING} "." ${VERSION_TWEAK}) endif () + if (VERSION_EXTRA) - set(VERSION_STRING "${VERSION_STRING}${VERSION_EXTRA}") + string(CONCAT VERSION_STRING ${VERSION_STRING} "." ${VERSION_EXTRA}) endif () -set (VERSION_FULL "${PROJECT_NAME} ${VERSION_STRING}") - -if (APPLE) - # dirty hack: ld: malformed 64-bit a.b.c.d.e version number: 1.1.54160 - math (EXPR VERSION_SO1 "${VERSION_REVISION}/255") - math (EXPR VERSION_SO2 "${VERSION_REVISION}%255") - set (VERSION_SO "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_SO1}.${VERSION_SO2}") -else () - set (VERSION_SO "${VERSION_STRING}") -endif () +set (VERSION_NAME "${PROJECT_NAME}" CACHE STRING "") +set (VERSION_FULL "${VERSION_NAME} ${VERSION_STRING}" CACHE STRING "") +set (VERSION_SO "${VERSION_STRING}" CACHE STRING "") diff --git a/dbms/programs/CMakeLists.txt b/dbms/programs/CMakeLists.txt index 20baa6b039c..136616ca44b 100644 --- a/dbms/programs/CMakeLists.txt +++ b/dbms/programs/CMakeLists.txt @@ -13,6 +13,7 @@ option (ENABLE_CLICKHOUSE_COMPRESSOR "Enable clickhouse-compressor" ${ENABLE_CLI option (ENABLE_CLICKHOUSE_COPIER "Enable clickhouse-copier" ${ENABLE_CLICKHOUSE_ALL}) option (ENABLE_CLICKHOUSE_FORMAT "Enable clickhouse-format" ${ENABLE_CLICKHOUSE_ALL}) option (ENABLE_CLICKHOUSE_OBFUSCATOR "Enable clickhouse-obfuscator" ${ENABLE_CLICKHOUSE_ALL}) +option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "Enable clickhouse-odbc-bridge" ${ENABLE_CLICKHOUSE_ALL}) configure_file (config_tools.h.in ${CMAKE_CURRENT_BINARY_DIR}/config_tools.h) @@ -27,10 +28,11 @@ add_subdirectory (copier) add_subdirectory (format) add_subdirectory (clang) add_subdirectory (obfuscator) +add_subdirectory (odbc-bridge) if (CLICKHOUSE_SPLIT_BINARY) set (CLICKHOUSE_ALL_TARGETS clickhouse-server clickhouse-client clickhouse-local clickhouse-benchmark clickhouse-performance-test - clickhouse-extract-from-config clickhouse-format clickhouse-copier) + clickhouse-extract-from-config clickhouse-compressor clickhouse-format clickhouse-copier clickhouse-odbc-bridge) if (USE_EMBEDDED_COMPILER) list (APPEND CLICKHOUSE_ALL_TARGETS clickhouse-clang clickhouse-lld) @@ -83,6 +85,9 @@ else () if (USE_EMBEDDED_COMPILER) target_link_libraries (clickhouse clickhouse-compiler-lib) endif () + if (ENABLE_CLICKHOUSE_ODBC_BRIDGE) + target_link_libraries (clickhouse clickhouse-odbc-bridge-lib) + endif() set (CLICKHOUSE_BUNDLE) if (ENABLE_CLICKHOUSE_SERVER) @@ -135,6 +140,12 @@ else () install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-obfuscator DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-obfuscator) endif () + if (ENABLE_CLICKHOUSE_ODBC_BRIDGE) + add_custom_target (clickhouse-odbc-bridge ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-odbc-bridge DEPENDS clickhouse) + install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-odbc-bridge DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + list(APPEND CLICKHOUSE_BUNDLE clickhouse-odbc-bridge) + endif () + # install always because depian package want this files: add_custom_target (clickhouse-clang ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-clang DEPENDS clickhouse) @@ -152,6 +163,6 @@ else () endif () -if (USE_EMBEDDED_COMPILER AND ENABLE_CLICKHOUSE_SERVER) +if (TARGET clickhouse-server AND TARGET copy-headers) add_dependencies(clickhouse-server copy-headers) endif () diff --git a/dbms/programs/clang/CMakeLists.txt b/dbms/programs/clang/CMakeLists.txt index 00d7215e74c..4844cb37c93 100644 --- a/dbms/programs/clang/CMakeLists.txt +++ b/dbms/programs/clang/CMakeLists.txt @@ -13,11 +13,12 @@ if (CLICKHOUSE_SPLIT_BINARY) endif () endif () -set(TMP_HEADERS_DIR "${CMAKE_CURRENT_BINARY_DIR}/headers") +set(TMP_HEADERS_DIR "${CMAKE_CURRENT_BINARY_DIR}/${INTERNAL_COMPILER_HEADERS_RELATIVE}") # Make and install empty dir for debian package if compiler disabled add_custom_target(make-headers-directory ALL COMMAND ${CMAKE_COMMAND} -E make_directory ${TMP_HEADERS_DIR}) -install(DIRECTORY ${TMP_HEADERS_DIR} DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse COMPONENT clickhouse) -if (USE_EMBEDDED_COMPILER) +install(DIRECTORY ${TMP_HEADERS_DIR} DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse/${INTERNAL_COMPILER_HEADERS_DIR} COMPONENT clickhouse) +# TODO: fix on macos copy_headers.sh: sed --posix +if (USE_EMBEDDED_COMPILER AND NOT APPLE) add_custom_target(copy-headers ALL env CLANG=${CMAKE_CURRENT_BINARY_DIR}/../clickhouse-clang BUILD_PATH=${ClickHouse_BINARY_DIR} DESTDIR=${ClickHouse_SOURCE_DIR} ${ClickHouse_SOURCE_DIR}/copy_headers.sh ${ClickHouse_SOURCE_DIR} ${TMP_HEADERS_DIR} DEPENDS clickhouse-clang WORKING_DIRECTORY ${ClickHouse_SOURCE_DIR} SOURCES ${ClickHouse_SOURCE_DIR}/copy_headers.sh) if (USE_INTERNAL_LLVM_LIBRARY) diff --git a/dbms/programs/clang/Compiler-5.0.0/CMakeLists.txt b/dbms/programs/clang/Compiler-5.0.0/CMakeLists.txt index e07a570a434..d02d266d5a5 100644 --- a/dbms/programs/clang/Compiler-5.0.0/CMakeLists.txt +++ b/dbms/programs/clang/Compiler-5.0.0/CMakeLists.txt @@ -43,4 +43,7 @@ LLVMSupport #PollyPPCG PUBLIC ${ZLIB_LIBRARIES} ${EXECINFO_LIBRARY} Threads::Threads +${MALLOC_LIBRARIES} +${GLIBC_COMPATIBILITY_LIBRARIES} +${MEMCPY_LIBRARIES} ) diff --git a/dbms/programs/clang/Compiler-6.0.0/CMakeLists.txt b/dbms/programs/clang/Compiler-6.0.0/CMakeLists.txt index 4f5e703bd63..701b99d08e3 100644 --- a/dbms/programs/clang/Compiler-6.0.0/CMakeLists.txt +++ b/dbms/programs/clang/Compiler-6.0.0/CMakeLists.txt @@ -43,4 +43,7 @@ ${REQUIRED_LLVM_LIBRARIES} #PollyPPCG PUBLIC ${ZLIB_LIBRARIES} ${EXECINFO_LIBRARY} Threads::Threads +${MALLOC_LIBRARIES} +${GLIBC_COMPATIBILITY_LIBRARIES} +${MEMCPY_LIBRARIES} ) diff --git a/dbms/programs/clang/Compiler-7.0.0/CMakeLists.txt b/dbms/programs/clang/Compiler-7.0.0/CMakeLists.txt index f46e8ef0dc1..081037cdeed 100644 --- a/dbms/programs/clang/Compiler-7.0.0/CMakeLists.txt +++ b/dbms/programs/clang/Compiler-7.0.0/CMakeLists.txt @@ -39,4 +39,7 @@ lldCore ${REQUIRED_LLVM_LIBRARIES} PUBLIC ${ZLIB_LIBRARIES} ${EXECINFO_LIBRARY} Threads::Threads +${MALLOC_LIBRARIES} +${GLIBC_COMPATIBILITY_LIBRARIES} +${MEMCPY_LIBRARIES} ) diff --git a/dbms/programs/client/CMakeLists.txt b/dbms/programs/client/CMakeLists.txt index c7d2311b11e..bb71d7e113d 100644 --- a/dbms/programs/client/CMakeLists.txt +++ b/dbms/programs/client/CMakeLists.txt @@ -1,6 +1,8 @@ add_library (clickhouse-client-lib Client.cpp) target_link_libraries (clickhouse-client-lib clickhouse_functions clickhouse_aggregate_functions ${LINE_EDITING_LIBS} ${Boost_PROGRAM_OPTIONS_LIBRARY}) -target_include_directories (clickhouse-client-lib SYSTEM PRIVATE ${READLINE_INCLUDE_DIR}) +if (READLINE_INCLUDE_DIR) + target_include_directories (clickhouse-client-lib SYSTEM PRIVATE ${READLINE_INCLUDE_DIR}) +endif () if (CLICKHOUSE_SPLIT_BINARY) add_executable (clickhouse-client clickhouse-client.cpp) diff --git a/dbms/programs/client/Client.cpp b/dbms/programs/client/Client.cpp index 6666ad80efa..369168a46ac 100644 --- a/dbms/programs/client/Client.cpp +++ b/dbms/programs/client/Client.cpp @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -86,9 +87,106 @@ namespace ErrorCodes extern const int UNKNOWN_PACKET_FROM_SERVER; extern const int UNEXPECTED_PACKET_FROM_SERVER; extern const int CLIENT_OUTPUT_FORMAT_SPECIFIED; + extern const int LOGICAL_ERROR; } +/// Checks expected server and client error codes in testmode. +/// To enable it add special comment after the query: "-- { serverError 60 }" or "-- { clientError 20 }". +class TestHint +{ +public: + TestHint(bool enabled_, const String & query) + : enabled(enabled_), + server_error(0), + client_error(0) + { + if (!enabled_) + return; + + size_t pos = query.find("--"); + if (pos != String::npos && query.find("--", pos + 2) != String::npos) + return; /// It's not last comment. Hint belongs to commented query. + + if (pos != String::npos) + { + pos = query.find('{', pos + 2); + if (pos != String::npos) + { + String hint = query.substr(pos + 1); + pos = hint.find('}'); + hint.resize(pos); + parse(hint); + } + } + } + + /// @returns true if it's possible to continue without reconnect + bool checkActual(int & actual_server_error, int & actual_client_error, + bool & got_exception, std::unique_ptr & last_exception) const + { + if (!enabled) + return true; + + if (allErrorsExpected(actual_server_error, actual_client_error)) + { + got_exception = false; + last_exception.reset(); + actual_server_error = 0; + actual_client_error = 0; + return false; + } + + if (lostExpectedError(actual_server_error, actual_client_error)) + { + std::cerr << "Success when error expected. It expects server error " + << server_error << ", client error " << client_error << "." << std::endl; + got_exception = true; + last_exception = std::make_unique("Success when error expected", ErrorCodes::LOGICAL_ERROR); /// return error to OS + return false; + } + + return true; + } + + int serverError() const { return server_error; } + int clientError() const { return client_error; } + +private: + bool enabled; + int server_error; + int client_error; + + void parse(const String & hint) + { + std::stringstream ss; + ss << hint; + while (!ss.eof()) + { + String item; + ss >> item; + if (item.empty()) + break; + + if (item == "serverError") + ss >> server_error; + else if (item == "clientError") + ss >> client_error; + } + } + + bool allErrorsExpected(int actual_server_error, int actual_client_error) const + { + return (server_error || client_error) && (server_error == actual_server_error) && (client_error == actual_client_error); + } + + bool lostExpectedError(int actual_server_error, int actual_client_error) const + { + return (server_error && !actual_server_error) || (client_error && !actual_client_error); + } +}; + + class Client : public Poco::Util::Application { public: @@ -107,6 +205,7 @@ private: bool is_interactive = true; /// Use either readline interface or batch mode. bool need_render_progress = true; /// Render query execution progress. bool echo_queries = false; /// Print queries before execution in batch mode. + bool ignore_error = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode. bool print_time_to_stderr = false; /// Output execution time to stderr in batch mode. bool stdin_is_not_tty = false; /// stdin is not a terminal. @@ -162,6 +261,10 @@ private: /// If the last query resulted in exception. bool got_exception = false; + int expected_server_error = 0; + int expected_client_error = 0; + int actual_server_error = 0; + int actual_client_error = 0; String server_version; String server_display_name; @@ -383,9 +486,9 @@ private: { need_render_progress = config().getBool("progress", false); echo_queries = config().getBool("echo", false); + ignore_error = config().getBool("ignore-error", false); } - connection_parameters = ConnectionParameters(config()); connect(); /// Initialize DateLUT here to avoid counting time spent here as query execution time. @@ -503,6 +606,8 @@ private: void connect() { + connection_parameters = ConnectionParameters(config()); + if (is_interactive) std::cout << "Connecting to " << (!connection_parameters.default_database.empty() ? "database " + connection_parameters.default_database + " at " : "") @@ -524,6 +629,7 @@ private: String server_name; UInt64 server_version_major = 0; UInt64 server_version_minor = 0; + UInt64 server_version_patch = 0; UInt64 server_revision = 0; if (max_client_network_bandwidth) @@ -532,9 +638,9 @@ private: connection->setThrottler(throttler); } - connection->getServerVersion(server_name, server_version_major, server_version_minor, server_revision); + connection->getServerVersion(server_name, server_version_major, server_version_minor, server_version_patch, server_revision); - server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_revision); + server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_version_patch); if (server_display_name = connection->getServerDisplayName(); server_display_name.length() == 0) { @@ -545,6 +651,7 @@ private: { std::cout << "Connected to " << server_name << " server version " << server_version + << " revision " << server_revision << "." << std::endl << std::endl; } } @@ -626,10 +733,14 @@ private: } catch (const Exception & e) { - std::cerr << std::endl - << "Exception on client:" << std::endl - << "Code: " << e.code() << ". " << e.displayText() << std::endl - << std::endl; + actual_client_error = e.code(); + if (!actual_client_error || actual_client_error != expected_client_error) + { + std::cerr << std::endl + << "Exception on client:" << std::endl + << "Code: " << e.code() << ". " << e.displayText() << std::endl + << std::endl; + } /// Client-side exception during query execution can result in the loss of /// sync in the connection protocol. @@ -667,7 +778,7 @@ private: bool process(const String & text) { - const auto ignore_error = config().getBool("ignore-error", false); + const bool test_mode = config().has("testmode"); if (config().has("multiquery")) { /// Several queries separated by ';'. @@ -711,6 +822,10 @@ private: while (isWhitespaceASCII(*begin) || *begin == ';') ++begin; + TestHint test_hint(test_mode, query); + expected_client_error = test_hint.clientError(); + expected_server_error = test_hint.serverError(); + try { if (!processSingleQuery(query, ast) && !ignore_error) @@ -718,10 +833,16 @@ private: } catch (...) { - std::cerr << "Error on processing query: " << query << std::endl << getCurrentExceptionMessage(true); + last_exception = std::make_unique(getCurrentExceptionMessage(true), getCurrentExceptionCode()); + actual_client_error = last_exception->code(); + if (!ignore_error && (!actual_client_error || actual_client_error != expected_client_error)) + std::cerr << "Error on processing query: " << query << std::endl << last_exception->message(); got_exception = true; } + if (!test_hint.checkActual(actual_server_error, actual_client_error, got_exception, last_exception)) + connection->forceConnected(); + if (got_exception && !ignore_error) { if (is_interactive) @@ -1391,6 +1512,14 @@ private: resetOutput(); got_exception = true; + actual_server_error = e.code(); + if (expected_server_error) + { + if (actual_server_error == expected_server_error) + return; + std::cerr << "Expected error code: " << expected_server_error << " but got: " << actual_server_error << "." << std::endl; + } + std::string text = e.displayText(); auto embedded_stack_trace_pos = text.find("Stack trace"); @@ -1425,10 +1554,7 @@ private: void showClientVersion() { - std::cout << "ClickHouse client version " << DBMS_VERSION_MAJOR - << "." << DBMS_VERSION_MINOR - << "." << ClickHouseRevision::get() - << "." << std::endl; + std::cout << DBMS_NAME << " client version " << VERSION_STRING << "." << std::endl; } public: @@ -1524,13 +1650,15 @@ public: ("pager", po::value(), "pager") ("multiline,m", "multiline") ("multiquery,n", "multiquery") - ("ignore-error", "Do not stop processing in multiquery mode") ("format,f", po::value(), "default output format") + ("testmode,T", "enable test hints in comments") + ("ignore-error", "do not stop processing in multiquery mode") ("vertical,E", "vertical output format, same as --format=Vertical or FORMAT Vertical or \\G at end of command") ("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)") ("stacktrace", "print stack traces of exceptions") ("progress", "print progress even in non-interactive mode") ("version,V", "print version information and exit") + ("version-clean", "print version in machine-readable format and exit") ("echo", "in batch mode, print query before execution") ("max_client_network_bandwidth", po::value(), "the maximum speed of data exchange over the network for the client in bytes per second.") ("compression", po::value(), "enable or disable compression") @@ -1562,6 +1690,12 @@ public: exit(0); } + if (options.count("version-clean")) + { + std::cout << VERSION_STRING; + exit(0); + } + /// Output of help message. if (options.count("help") || (options.count("host") && options["host"].as() == "elp")) /// If user writes -help instead of --help. @@ -1635,6 +1769,8 @@ public: config().setBool("multiline", true); if (options.count("multiquery")) config().setBool("multiquery", true); + if (options.count("testmode")) + config().setBool("testmode", true); if (options.count("ignore-error")) config().setBool("ignore-error", true); if (options.count("format")) diff --git a/dbms/programs/client/clickhouse-client.xml b/dbms/programs/client/clickhouse-client.xml index 3cd05d7fec9..083f035d908 100644 --- a/dbms/programs/client/clickhouse-client.xml +++ b/dbms/programs/client/clickhouse-client.xml @@ -15,17 +15,18 @@ {display_name} :) - {display_name} \e[1;32m:)\e[0m - {display_name} \e[1;31m:)\e[0m + {display_name} \x01\e[1;32m\x02:)\x01\e[0m\x02 + {display_name} \x01\e[1;31m\x02:)\x01\e[0m\x02 diff --git a/dbms/programs/local/LocalServer.cpp b/dbms/programs/local/LocalServer.cpp index fff89dcd7d5..4528ad40128 100644 --- a/dbms/programs/local/LocalServer.cpp +++ b/dbms/programs/local/LocalServer.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -359,10 +360,7 @@ void LocalServer::setupUsers() static void showClientVersion() { - std::cout << "ClickHouse client version " << DBMS_VERSION_MAJOR - << "." << DBMS_VERSION_MINOR - << "." << ClickHouseRevision::get() - << "." << std::endl; + std::cout << DBMS_NAME << " client version " << VERSION_STRING << "." << std::endl; } std::string LocalServer::getHelpHeader() const diff --git a/dbms/programs/main.cpp b/dbms/programs/main.cpp index aba03a87a83..3bf1fa1d6ed 100644 --- a/dbms/programs/main.cpp +++ b/dbms/programs/main.cpp @@ -56,6 +56,10 @@ int mainEntryClickHouseClusterCopier(int argc, char ** argv); #if ENABLE_CLICKHOUSE_OBFUSCATOR int mainEntryClickHouseObfuscator(int argc, char ** argv); #endif +#if ENABLE_CLICKHOUSE_ODBC_BRIDGE || !defined(ENABLE_CLICKHOUSE_ODBC_BRIDGE) +int mainEntryClickHouseODBCBridge(int argc, char ** argv); +#endif + #if USE_EMBEDDED_COMPILER int mainEntryClickHouseClang(int argc, char ** argv); @@ -101,6 +105,10 @@ std::pair clickhouse_applications[] = #if ENABLE_CLICKHOUSE_OBFUSCATOR {"obfuscator", mainEntryClickHouseObfuscator}, #endif +#if ENABLE_CLICKHOUSE_ODBC_BRIDGE || !defined(ENABLE_CLICKHOUSE_ODBC_BRIDGE) + {"odbc-bridge", mainEntryClickHouseODBCBridge}, +#endif + #if USE_EMBEDDED_COMPILER {"clang", mainEntryClickHouseClang}, {"clang++", mainEntryClickHouseClang}, diff --git a/dbms/programs/obfuscator/Obfuscator.cpp b/dbms/programs/obfuscator/Obfuscator.cpp index 854771b3b26..3ba6d76179e 100644 --- a/dbms/programs/obfuscator/Obfuscator.cpp +++ b/dbms/programs/obfuscator/Obfuscator.cpp @@ -58,13 +58,13 @@ It is designed to retain the following properties of data: Most of the properties above are viable for performance testing: - reading data, filtering, aggregation and sorting will work at almost the same speed - as on original data due to saved cardinalities, magnitudes, compression ratios, etc. + as on original data due to saved cardinalities, magnitudes, compression ratios, etc. It works in deterministic fashion: you define a seed value and transform is totally determined by input data and by seed. Some transforms are one to one and could be reversed, so you need to have large enough seed and keep it in secret. It use some cryptographic primitives to transform data, but from the cryptographic point of view, - it doesn't do anything properly and you should never consider the result as secure, unless you have other reasons for it. + it doesn't do anything properly and you should never consider the result as secure, unless you have other reasons for it. It may retain some data you don't want to publish. @@ -74,7 +74,7 @@ So, the user will be able to count exact ratio of mobile traffic. Another example, suppose you have some private data in your table, like user email and you don't want to publish any single email address. If your table is large enough and contain multiple different emails and there is no email that have very high frequency than all others, - it will perfectly anonymize all data. But if you have small amount of different values in a column, it can possibly reproduce some of them. + it will perfectly anonymize all data. But if you have small amount of different values in a column, it can possibly reproduce some of them. And you should take care and look at exact algorithm, how this tool works, and probably fine tune some of it command line parameters. This tool works fine only with reasonable amount of data (at least 1000s of rows). diff --git a/dbms/programs/odbc-bridge/CMakeLists.txt b/dbms/programs/odbc-bridge/CMakeLists.txt new file mode 100644 index 00000000000..dcdfa7009d4 --- /dev/null +++ b/dbms/programs/odbc-bridge/CMakeLists.txt @@ -0,0 +1,13 @@ +add_library (clickhouse-odbc-bridge-lib + Handlers.cpp + HandlerFactory.cpp + ODBCBridge.cpp + ) + +target_link_libraries (clickhouse-odbc-bridge-lib clickhouse_common_io daemon) +target_include_directories (clickhouse-odbc-bridge-lib PUBLIC ${ClickHouse_SOURCE_DIR}/libs/libdaemon/include) + +if (CLICKHOUSE_SPLIT_BINARY) + add_executable (clickhouse-odbc-bridge odbc-bridge.cpp) + target_link_libraries (clickhouse-odbc-bridge clickhouse-odbc-bridge-lib) +endif () diff --git a/dbms/programs/odbc-bridge/HandlerFactory.cpp b/dbms/programs/odbc-bridge/HandlerFactory.cpp new file mode 100644 index 00000000000..9a3824cef79 --- /dev/null +++ b/dbms/programs/odbc-bridge/HandlerFactory.cpp @@ -0,0 +1,23 @@ +#include "HandlerFactory.h" +#include + +#include +#include +#include + +namespace DB +{ +Poco::Net::HTTPRequestHandler * HandlerFactory::createRequestHandler(const Poco::Net::HTTPServerRequest & request) +{ + const auto & uri = request.getURI(); + LOG_TRACE(log, "Request URI: " + uri); + + if (uri == "/ping" && request.getMethod() == Poco::Net::HTTPRequest::HTTP_GET) + return new PingHandler(keep_alive_timeout); + + if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST) + return new ODBCHandler(pool_map, keep_alive_timeout, context); + + return nullptr; +} +} diff --git a/dbms/programs/odbc-bridge/HandlerFactory.h b/dbms/programs/odbc-bridge/HandlerFactory.h new file mode 100644 index 00000000000..92a0267a16c --- /dev/null +++ b/dbms/programs/odbc-bridge/HandlerFactory.h @@ -0,0 +1,37 @@ +#pragma once +#include +#include +#include +#include +#include "Handlers.h" + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + #include +#pragma GCC diagnostic pop + + +namespace DB +{ +/** Factory for '/ping' and '/' handlers. + * Also stores Session pools for ODBC connections + */ +class HandlerFactory : public Poco::Net::HTTPRequestHandlerFactory +{ +public: + HandlerFactory(const std::string & name_, size_t keep_alive_timeout_, std::shared_ptr context_) + : log(&Poco::Logger::get(name_)), name(name_), keep_alive_timeout(keep_alive_timeout_), context(context_) + { + pool_map = std::make_shared(); + } + + Poco::Net::HTTPRequestHandler * createRequestHandler(const Poco::Net::HTTPServerRequest & request) override; + +private: + Poco::Logger * log; + std::string name; + size_t keep_alive_timeout; + std::shared_ptr context; + std::shared_ptr pool_map; +}; +} diff --git a/dbms/programs/odbc-bridge/Handlers.cpp b/dbms/programs/odbc-bridge/Handlers.cpp new file mode 100644 index 00000000000..9e8cb5dee1a --- /dev/null +++ b/dbms/programs/odbc-bridge/Handlers.cpp @@ -0,0 +1,146 @@ +#include "Handlers.h" +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace DB +{ +namespace ErrorCodes +{ + extern const int BAD_REQUEST_PARAMETER; +} + +namespace +{ + std::unique_ptr parseColumns(std::string && column_string) + { + std::unique_ptr sample_block = std::make_unique(); + auto names_and_types = NamesAndTypesList::parse(column_string); + for (const NameAndTypePair & column_data : names_and_types) + sample_block->insert({column_data.type, column_data.name}); + return sample_block; + } +} + + +ODBCHandler::PoolPtr ODBCHandler::getPool(const std::string & connection_str) +{ + std::lock_guard lock(mutex); + if (!pool_map->count(connection_str)) + { + pool_map->emplace(connection_str, createAndCheckResizePocoSessionPool([connection_str] { + return std::make_shared("ODBC", connection_str); + })); + } + return pool_map->at(connection_str); +} + +void ODBCHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) +{ + Poco::Net::HTMLForm params(request, request.stream()); + LOG_TRACE(log, "Request URI: " + request.getURI()); + + auto process_error = [&response, this](const std::string & message) { + response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); + if (!response.sent()) + response.send() << message << std::endl; + LOG_WARNING(log, message); + }; + + if (!params.has("query")) + { + process_error("No 'query' in request body"); + return; + } + + if (!params.has("columns")) + { + process_error("No 'columns' in request URL"); + return; + } + + if (!params.has("connection_string")) + { + process_error("No 'connection_string' in request URL"); + return; + } + + size_t max_block_size = DEFAULT_BLOCK_SIZE; + if (params.has("max_block_size")) + { + std::string max_block_size_str = params.get("max_block_size", ""); + if (max_block_size_str.empty()) + { + process_error("Empty max_block_size specified"); + return; + } + max_block_size = parse(max_block_size_str); + } + + std::string columns = params.get("columns"); + std::unique_ptr sample_block; + try + { + sample_block = parseColumns(std::move(columns)); + } + catch (const Exception & ex) + { + process_error("Invalid 'columns' parameter in request body '" + ex.message() + "'"); + LOG_WARNING(log, ex.getStackTrace().toString()); + return; + } + + std::string format = params.get("format", "RowBinary"); + std::string query = params.get("query"); + LOG_TRACE(log, "Query: " << query); + + std::string connection_string = params.get("connection_string"); + LOG_TRACE(log, "Connection string: '" << connection_string << "'"); + + WriteBufferFromHTTPServerResponse out(request, response, keep_alive_timeout); + try + { + BlockOutputStreamPtr writer = FormatFactory::instance().getOutput(format, out, *sample_block, *context); + auto pool = getPool(connection_string); + ODBCBlockInputStream inp(pool->get(), query, *sample_block, max_block_size); + copyData(inp, *writer); + } + catch (...) + { + auto message = getCurrentExceptionMessage(true); + response.setStatusAndReason( + Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); // can't call process_error, bacause of too soon response sending + writeStringBinary(message, out); + tryLogCurrentException(log); + } +} + +void PingHandler::handleRequest(Poco::Net::HTTPServerRequest & /*request*/, Poco::Net::HTTPServerResponse & response) +{ + try + { + setResponseDefaultHeaders(response, keep_alive_timeout); + const char * data = "Ok.\n"; + response.sendBuffer(data, strlen(data)); + } + catch (...) + { + tryLogCurrentException("PingHandler"); + } +} +} diff --git a/dbms/programs/odbc-bridge/Handlers.h b/dbms/programs/odbc-bridge/Handlers.h new file mode 100644 index 00000000000..a8cb65015d7 --- /dev/null +++ b/dbms/programs/odbc-bridge/Handlers.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + #include +#pragma GCC diagnostic pop + +namespace DB +{ +/** Main handler for requests to ODBC driver + * requires connection_string and columns in request params + * and also query in request body + * response in RowBinary format + */ +class ODBCHandler : public Poco::Net::HTTPRequestHandler +{ +public: + using PoolPtr = std::shared_ptr; + using PoolMap = std::unordered_map; + + ODBCHandler(std::shared_ptr pool_map_, + size_t keep_alive_timeout_, + std::shared_ptr context_) + : log(&Poco::Logger::get("ODBCHandler")) + , pool_map(pool_map_) + , keep_alive_timeout(keep_alive_timeout_) + , context(context_) + { + } + + void handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) override; + +private: + Poco::Logger * log; + + std::shared_ptr pool_map; + size_t keep_alive_timeout; + std::shared_ptr context; + + static inline std::mutex mutex; + + PoolPtr getPool(const std::string & connection_str); +}; + +/** Simple ping handler, answers "Ok." to GET request + */ +class PingHandler : public Poco::Net::HTTPRequestHandler +{ +public: + PingHandler(size_t keep_alive_timeout_) : keep_alive_timeout(keep_alive_timeout_) {} + void handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) override; + +private: + size_t keep_alive_timeout; +}; +} diff --git a/dbms/programs/odbc-bridge/ODBCBridge.cpp b/dbms/programs/odbc-bridge/ODBCBridge.cpp new file mode 100644 index 00000000000..bab58250fa4 --- /dev/null +++ b/dbms/programs/odbc-bridge/ODBCBridge.cpp @@ -0,0 +1,205 @@ +#include "ODBCBridge.h" +#include "HandlerFactory.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ARGUMENT_OUT_OF_BOUND; +} + +namespace +{ + Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log) + { + Poco::Net::SocketAddress socket_address; + try + { + socket_address = Poco::Net::SocketAddress(host, port); + } + catch (const Poco::Net::DNSException & e) + { + const auto code = e.code(); + if (code == EAI_FAMILY +#if defined(EAI_ADDRFAMILY) + || code == EAI_ADDRFAMILY +#endif + ) + { + LOG_ERROR(log, + "Cannot resolve listen_host (" << host << "), error " << e.code() << ": " << e.message() + << ". " + "If it is an IPv6 address and your host has disabled IPv6, then consider to " + "specify IPv4 address to listen in element of configuration " + "file. Example: 0.0.0.0"); + } + + throw; + } + return socket_address; + } + + Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, Poco::Logger * log) + { + auto address = makeSocketAddress(host, port, log); +#if POCO_VERSION < 0x01080000 + socket.bind(address, /* reuseAddress = */ true); +#else + socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ false); +#endif + + socket.listen(/* backlog = */ 64); + + return address; + }; +} + +void ODBCBridge::handleHelp(const std::string &, const std::string &) +{ + Poco::Util::HelpFormatter helpFormatter(options()); + helpFormatter.setCommand(commandName()); + helpFormatter.setHeader("HTTP-proxy for odbc requests"); + helpFormatter.setUsage("--http-port "); + helpFormatter.format(std::cerr); + + stopOptionsProcessing(); +} + + +void ODBCBridge::defineOptions(Poco::Util::OptionSet & options) +{ + options.addOption(Poco::Util::Option("http-port", "", "port to listen").argument("http-port", true).binding("http-port")); + options.addOption( + Poco::Util::Option("listen-host", "", "hostname to listen, default localhost").argument("listen-host").binding("listen-host")); + options.addOption( + Poco::Util::Option("http-timeout", "", "http timout for socket, default 1800").argument("http-timeout").binding("http-timeout")); + + options.addOption(Poco::Util::Option("max-server-connections", "", "max connections to server, default 1024") + .argument("max-server-connections") + .binding("max-server-connections")); + options.addOption(Poco::Util::Option("keep-alive-timeout", "", "keepalive timeout, default 10") + .argument("keep-alive-timeout") + .binding("keep-alive-timeout")); + + options.addOption(Poco::Util::Option("log-level", "", "sets log level, default info").argument("log-level").binding("logger.level")); + + options.addOption( + Poco::Util::Option("log-path", "", "log path for all logs, default console").argument("log-path").binding("logger.log")); + + options.addOption(Poco::Util::Option("err-log-path", "", "err log path for all logs, default no") + .argument("err-log-path") + .binding("logger.errorlog")); + + using Me = std::decay_t; + options.addOption(Poco::Util::Option("help", "", "produce this help message") + .binding("help") + .callback(Poco::Util::OptionCallback(this, &Me::handleHelp))); + + ServerApplication::defineOptions(options); /// Don't need complex BaseDaemon's .xml config +} + +void ODBCBridge::initialize(Application & self) +{ + BaseDaemon::closeFDs(); + is_help = config().has("help"); + + if (is_help) + return; + + if (!config().has("logger.log")) + config().setBool("logger.console", true); + + config().setString("logger", "ODBCBridge"); + + buildLoggers(config()); + log = &logger(); + hostname = config().getString("listen-host", "localhost"); + port = config().getUInt("http-port"); + if (port > 0xFFFF) + throw Exception("Out of range 'http-port': " + std::to_string(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND); + + http_timeout = config().getUInt("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT); + max_server_connections = config().getUInt("max-server-connections", 1024); + keep_alive_timeout = config().getUInt("keep-alive-timeout", 10); + + initializeTerminationAndSignalProcessing(); + + ServerApplication::initialize(self); +} + +void ODBCBridge::uninitialize() +{ + BaseDaemon::uninitialize(); +} + +int ODBCBridge::main(const std::vector & /*args*/) +{ + if (is_help) + return Application::EXIT_OK; + + LOG_INFO(log, "Starting up"); + Poco::Net::ServerSocket socket; + auto address = socketBindListen(socket, hostname, port, log); + socket.setReceiveTimeout(http_timeout); + socket.setSendTimeout(http_timeout); + Poco::ThreadPool server_pool(3, max_server_connections); + Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams; + http_params->setTimeout(http_timeout); + http_params->setKeepAliveTimeout(keep_alive_timeout); + + context = std::make_shared(Context::createGlobal()); + context->setGlobalContext(*context); + + auto server = Poco::Net::HTTPServer( + new HandlerFactory("ODBCRequestHandlerFactory-factory", keep_alive_timeout, context), server_pool, socket, http_params); + server.start(); + + LOG_INFO(log, "Listening http://" + address.toString()); + + SCOPE_EXIT({ + LOG_DEBUG(log, "Received termination signal."); + LOG_DEBUG(log, "Waiting for current connections to close."); + server.stop(); + for (size_t count : ext::range(1, 6)) + { + if (server.currentConnections() == 0) + break; + LOG_DEBUG(log, "Waiting for " << server.currentConnections() << " connections, try " << count); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + } + }); + + waitForTerminationRequest(); + return Application::EXIT_OK; +} +} + +int mainEntryClickHouseODBCBridge(int argc, char ** argv) +{ + DB::ODBCBridge app; + try + { + return app.run(argc, argv); + } + catch (...) + { + std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; + auto code = DB::getCurrentExceptionCode(); + return code ? code : 1; + } +} diff --git a/dbms/programs/odbc-bridge/ODBCBridge.h b/dbms/programs/odbc-bridge/ODBCBridge.h new file mode 100644 index 00000000000..4ae11ad7301 --- /dev/null +++ b/dbms/programs/odbc-bridge/ODBCBridge.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ +/** Class represents clickhouse-odbc-bridge server, which listen + * incoming HTTP POST and GET requests on specified port and host. + * Has two handlers '/' for all incoming POST requests to ODBC driver + * and /ping for GET request about service status + */ +class ODBCBridge : public BaseDaemon +{ +public: + void defineOptions(Poco::Util::OptionSet & options) override; + +protected: + void initialize(Application & self) override; + + void uninitialize() override; + + int main(const std::vector & args) override; + +private: + void handleHelp(const std::string &, const std::string &); + + bool is_help; + std::string hostname; + size_t port; + size_t http_timeout; + std::string log_level; + size_t max_server_connections; + size_t keep_alive_timeout; + + Poco::Logger * log; + + std::shared_ptr context; /// need for settings only +}; +} diff --git a/dbms/programs/odbc-bridge/README.md b/dbms/programs/odbc-bridge/README.md new file mode 100644 index 00000000000..91a6e476733 --- /dev/null +++ b/dbms/programs/odbc-bridge/README.md @@ -0,0 +1,38 @@ +# clickhouse-odbc-bridge + +Simple HTTP-server which works like a proxy for ODBC driver. The main motivation +was possible segfaults or another faults in ODBC implementations, which can +crash whole clickhouse-server process. + +This tool works via HTTP, not via pipes, shared memory, or TCP because: +- It's simplier to implement +- It's simplier to debug +- jdbc-bridge can be implemented in the same way + +## Usage + +`clickhouse-server` use this tool inside odbc table function and StorageODBC. +However it can be used as standalone tool from command line with the following +parameters in POST-request URL: +- `connection_string` -- ODBC connection string. +- `columns` -- columns in ClickHouse NamesAndTypesList format, name in backticks, + type as string. Name and type are space separated, rows separated with + newline. +- `max_block_size` -- optional parameter, sets maximum size of single block. +Query is send in post body. Response is returned in RowBinary format. + +## Example: + +```bash +$ clickhouse-odbc-bridge --http-port 9018 --daemon + +$ curl -d "query=SELECT PageID, ImpID, AdType FROM Keys ORDER BY PageID, ImpID" --data-urlencode "connection_string=DSN=ClickHouse;DATABASE=stat" --data-urlencode "columns=columns format version: 1 +3 columns: +\`PageID\` String +\`ImpID\` String +\`AdType\` String +" "http://localhost:9018/" > result.txt + +$ cat result.txt +12246623837185725195925621517 +``` diff --git a/dbms/programs/odbc-bridge/odbc-bridge.cpp b/dbms/programs/odbc-bridge/odbc-bridge.cpp new file mode 100644 index 00000000000..af42eef8647 --- /dev/null +++ b/dbms/programs/odbc-bridge/odbc-bridge.cpp @@ -0,0 +1,2 @@ +int mainEntryClickHouseODBCBridge(int argc, char ** argv); +int main(int argc_, char ** argv_) { return mainEntryClickHouseODBCBridge(argc_, argv_); } diff --git a/dbms/programs/performance-test/PerformanceTest.cpp b/dbms/programs/performance-test/PerformanceTest.cpp index 1f7421566a4..cf55173ad3a 100644 --- a/dbms/programs/performance-test/PerformanceTest.cpp +++ b/dbms/programs/performance-test/PerformanceTest.cpp @@ -521,11 +521,12 @@ public: std::string name; UInt64 version_major; UInt64 version_minor; + UInt64 version_patch; UInt64 version_revision; - connection.getServerVersion(name, version_major, version_minor, version_revision); + connection.getServerVersion(name, version_major, version_minor, version_patch, version_revision); std::stringstream ss; - ss << version_major << "." << version_minor << "." << version_revision; + ss << version_major << "." << version_minor << "." << version_patch; server_version = ss.str(); processTestsConfigurations(input_files); diff --git a/dbms/programs/server/CMakeLists.txt b/dbms/programs/server/CMakeLists.txt index 74297d29864..c146f40d281 100644 --- a/dbms/programs/server/CMakeLists.txt +++ b/dbms/programs/server/CMakeLists.txt @@ -19,7 +19,7 @@ if (CLICKHOUSE_SPLIT_BINARY) install (TARGETS clickhouse-server ${CLICKHOUSE_ALL_TARGETS} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) endif () -if (NOT APPLE AND NOT ARCH_FREEBSD) +if (OS_LINUX) set (GLIBC_MAX_REQUIRED 2.4) add_test(NAME GLIBC_required_version COMMAND bash -c "readelf -s ${CMAKE_CURRENT_BINARY_DIR}/../clickhouse-server | grep '@GLIBC' | grep -oP 'GLIBC_[\\d\\.]+' | sort | uniq | sort -r | perl -lnE 'exit 1 if $_ gt q{GLIBC_${GLIBC_MAX_REQUIRED}}'") endif () diff --git a/dbms/programs/server/InterserverIOHTTPHandler.cpp b/dbms/programs/server/InterserverIOHTTPHandler.cpp index 3cdbaa69b64..39d214503ba 100644 --- a/dbms/programs/server/InterserverIOHTTPHandler.cpp +++ b/dbms/programs/server/InterserverIOHTTPHandler.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -23,14 +24,40 @@ namespace ErrorCodes extern const int TOO_MANY_SIMULTANEOUS_QUERIES; } +std::pair InterserverIOHTTPHandler::checkAuthentication(Poco::Net::HTTPServerRequest & request) const +{ + const auto & config = server.config(); + + if (config.has("interserver_http_credentials.user")) + { + if (!request.hasCredentials()) + return {"Server requires HTTP Basic authentification, but client doesn't provide it", false}; + String scheme, info; + request.getCredentials(scheme, info); + + if (scheme != "Basic") + return {"Server requires HTTP Basic authentification but client provides another method", false}; + + String user = config.getString("interserver_http_credentials.user"); + String password = config.getString("interserver_http_credentials.password", ""); + + Poco::Net::HTTPBasicCredentials credentials(info); + if (std::make_pair(user, password) != std::make_pair(credentials.getUsername(), credentials.getPassword())) + return {"Incorrect user or password in HTTP Basic authentification", false}; + } + else if (request.hasCredentials()) + { + return {"Client requires HTTP Basic authentification, but server doesn't provide it", false}; + } + return {"", true}; +} + void InterserverIOHTTPHandler::processQuery(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) { HTMLForm params(request); LOG_TRACE(log, "Request URI: " << request.getURI()); - /// NOTE: You can do authentication here if you need to. - String endpoint_name = params.get("endpoint"); bool compress = params.get("compress") == "true"; @@ -65,8 +92,18 @@ void InterserverIOHTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & requ try { - processQuery(request, response); - LOG_INFO(log, "Done processing query"); + if (auto [msg, success] = checkAuthentication(request); success) + { + processQuery(request, response); + LOG_INFO(log, "Done processing query"); + } + else + { + response.setStatusAndReason(Poco::Net::HTTPServerResponse::HTTP_UNAUTHORIZED); + if (!response.sent()) + response.send() << msg << std::endl; + LOG_WARNING(log, "Query processing failed request: '" << request.getURI() << "' authentification failed"); + } } catch (Exception & e) { diff --git a/dbms/programs/server/InterserverIOHTTPHandler.h b/dbms/programs/server/InterserverIOHTTPHandler.h index bf9fef59982..fbaf432d4f9 100644 --- a/dbms/programs/server/InterserverIOHTTPHandler.h +++ b/dbms/programs/server/InterserverIOHTTPHandler.h @@ -34,6 +34,8 @@ private: CurrentMetrics::Increment metric_increment{CurrentMetrics::InterserverConnection}; void processQuery(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response); + + std::pair checkAuthentication(Poco::Net::HTTPServerRequest & request) const; }; } diff --git a/dbms/programs/server/Server.cpp b/dbms/programs/server/Server.cpp index 26c979e8e1c..3a55add70af 100644 --- a/dbms/programs/server/Server.cpp +++ b/dbms/programs/server/Server.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -58,6 +59,7 @@ namespace ErrorCodes extern const int NO_ELEMENTS_IN_CONFIG; extern const int SUPPORT_IS_DISABLED; extern const int ARGUMENT_OUT_OF_BOUND; + extern const int EXCESSIVE_ELEMENT_IN_CONFIG; } @@ -209,25 +211,49 @@ int Server::main(const std::vector & /*args*/) Poco::File(user_files_path).createDirectories(); } - if (config().has("interserver_http_port")) + if (config().has("interserver_http_port") && config().has("interserver_https_port")) + throw Exception("Both http and https interserver ports are specified", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG); + + static const auto interserver_tags = { - String this_host = config().getString("interserver_http_host", ""); + std::make_tuple("interserver_http_host", "interserver_http_port", "http"), + std::make_tuple("interserver_https_host", "interserver_https_port", "https") + }; - if (this_host.empty()) + for (auto [host_tag, port_tag, scheme] : interserver_tags) + { + if (config().has(port_tag)) { - this_host = getFQDNOrHostName(); - LOG_DEBUG(log, - "Configuration parameter 'interserver_http_host' doesn't exist or exists and empty. Will use '" + this_host - + "' as replica host."); + String this_host = config().getString(host_tag, ""); + + if (this_host.empty()) + { + this_host = getFQDNOrHostName(); + LOG_DEBUG(log, + "Configuration parameter '" + String(host_tag) + "' doesn't exist or exists and empty. Will use '" + this_host + + "' as replica host."); + } + + String port_str = config().getString(port_tag); + int port = parse(port_str); + + if (port < 0 || port > 0xFFFF) + throw Exception("Out of range '" + String(port_tag) + "': " + toString(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND); + + global_context->setInterserverIOAddress(this_host, port); + global_context->setInterserverScheme(scheme); } + } - String port_str = config().getString("interserver_http_port"); - int port = parse(port_str); + if (config().has("interserver_http_credentials")) + { + String user = config().getString("interserver_http_credentials.user", ""); + String password = config().getString("interserver_http_credentials.password", ""); - if (port < 0 || port > 0xFFFF) - throw Exception("Out of range 'interserver_http_port': " + toString(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND); + if (user.empty()) + throw Exception("Configuration parameter interserver_http_credentials user can't be empty", ErrorCodes::NO_ELEMENTS_IN_CONFIG); - global_context->setInterserverIOAddress(this_host, port); + global_context->setInterserverCredentials(user, password); } if (config().has("macros")) @@ -276,6 +302,9 @@ int Server::main(const std::vector & /*args*/) if (config().has("max_table_size_to_drop")) global_context->setMaxTableSizeToDrop(config().getUInt64("max_table_size_to_drop")); + if (config().has("max_partition_size_to_drop")) + global_context->setMaxPartitionSizeToDrop(config().getUInt64("max_partition_size_to_drop")); + /// Size of cache for uncompressed blocks. Zero means disabled. size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", 0); if (uncompressed_cache_size) @@ -349,7 +378,7 @@ int Server::main(const std::vector & /*args*/) Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024)); Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams; - http_params->setTimeout(settings.receive_timeout); + http_params->setTimeout(settings.http_receive_timeout); http_params->setKeepAliveTimeout(keep_alive_timeout); std::vector> servers; @@ -512,6 +541,27 @@ int Server::main(const std::vector & /*args*/) LOG_INFO(log, "Listening interserver http: " + address.toString()); } + + if (config().has("interserver_https_port")) + { +#if USE_POCO_NETSSL + initSSL(); + Poco::Net::SecureServerSocket socket; + auto address = socket_bind_listen(socket, listen_host, config().getInt("interserver_https_port"), /* secure = */ true); + socket.setReceiveTimeout(settings.http_receive_timeout); + socket.setSendTimeout(settings.http_send_timeout); + servers.emplace_back(new Poco::Net::HTTPServer( + new InterserverIOHTTPHandlerFactory(*this, "InterserverIOHTTPHandler-factory"), + server_pool, + socket, + http_params)); + + LOG_INFO(log, "Listening interserver https: " + address.toString()); +#else + throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.", + ErrorCodes::SUPPORT_IS_DISABLED}; +#endif + } } catch (const Poco::Net::NetException & e) { diff --git a/dbms/programs/server/TCPHandler.cpp b/dbms/programs/server/TCPHandler.cpp index b6961c12a4d..c46b4c4e713 100644 --- a/dbms/programs/server/TCPHandler.cpp +++ b/dbms/programs/server/TCPHandler.cpp @@ -1,39 +1,34 @@ #include - +#include #include +#include #include #include #include - #include - #include #include #include #include #include - #include - #include #include #include #include #include #include - +#include #include #include - +#include +#include #include +#include +#include #include "TCPHandler.h" -#include -#include - -#include -#include namespace DB @@ -547,6 +542,7 @@ void TCPHandler::receiveHello() readStringBinary(client_name, *in); readVarUInt(client_version_major, *in); readVarUInt(client_version_minor, *in); + // NOTE For backward compatibility of the protocol, client cannot send its version_patch. readVarUInt(client_revision, *in); readStringBinary(default_database, *in); readStringBinary(user, *in); @@ -555,7 +551,8 @@ void TCPHandler::receiveHello() LOG_DEBUG(log, "Connected " << client_name << " version " << client_version_major << "." << client_version_minor - << "." << client_revision + << "." << client_version_patch + << ", revision: " << client_revision << (!default_database.empty() ? ", database: " + default_database : "") << (!user.empty() ? ", user: " + user : "") << "."); @@ -572,13 +569,11 @@ void TCPHandler::sendHello() writeVarUInt(DBMS_VERSION_MINOR, *out); writeVarUInt(ClickHouseRevision::get(), *out); if (client_revision >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) - { writeStringBinary(DateLUT::instance().getTimeZone(), *out); - } if (client_revision >= DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME) - { writeStringBinary(server_display_name, *out); - } + if (client_revision >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) + writeVarUInt(DBMS_VERSION_PATCH, *out); out->next(); } @@ -651,6 +646,7 @@ void TCPHandler::receiveQuery() client_info.client_name = client_name; client_info.client_version_major = client_version_major; client_info.client_version_minor = client_version_minor; + client_info.client_version_patch = client_version_patch; client_info.client_revision = client_revision; } diff --git a/dbms/programs/server/TCPHandler.h b/dbms/programs/server/TCPHandler.h index 6e2ff7d5bee..1969d02b48b 100644 --- a/dbms/programs/server/TCPHandler.h +++ b/dbms/programs/server/TCPHandler.h @@ -103,6 +103,7 @@ private: String client_name; UInt64 client_version_major = 0; UInt64 client_version_minor = 0; + UInt64 client_version_patch = 0; UInt64 client_revision = 0; Context connection_context; diff --git a/dbms/programs/server/config.xml b/dbms/programs/server/config.xml index 7dd7a00517e..e461d49d522 100644 --- a/dbms/programs/server/config.xml +++ b/dbms/programs/server/config.xml @@ -322,10 +322,12 @@ + diff --git a/dbms/programs/server/users.xml b/dbms/programs/server/users.xml index 41ac6057e9a..6f746baf2a9 100644 --- a/dbms/programs/server/users.xml +++ b/dbms/programs/server/users.xml @@ -55,7 +55,8 @@ 127.0.0.1 Each element of list has one of the following forms: - IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 2a02:6b8::3 or 2a02:6b8::3/64. + IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0 + 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::. Hostname. Example: server01.yandex.ru. To check access, DNS query is performed, and all received addresses compared to peer address. Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$ diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArray.cpp b/dbms/src/AggregateFunctions/AggregateFunctionArray.cpp index f42c5b6d142..9cb7d03bf69 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionArray.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionArray.cpp @@ -18,6 +18,9 @@ public: DataTypes transformArguments(const DataTypes & arguments) const override { + if (0 == arguments.size()) + throw Exception("-Array aggregate functions require at least one argument", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + DataTypes nested_arguments; for (const auto & type : arguments) { diff --git a/dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp b/dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp index 762baf2451b..8c188bcbb8e 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp @@ -38,9 +38,9 @@ void registerAggregateFunctionsBitwise(AggregateFunctionFactory & factory) factory.registerFunction("groupBitXor", createAggregateFunctionBitwise); /// Aliases for compatibility with MySQL. - factory.registerFunction("BIT_OR", createAggregateFunctionBitwise, AggregateFunctionFactory::CaseInsensitive); - factory.registerFunction("BIT_AND", createAggregateFunctionBitwise, AggregateFunctionFactory::CaseInsensitive); - factory.registerFunction("BIT_XOR", createAggregateFunctionBitwise, AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("BIT_OR", "groupBitOr", AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("BIT_AND", "groupBitAnd", AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("BIT_XOR", "groupBitXor", AggregateFunctionFactory::CaseInsensitive); } } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h b/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h index 4c70cc6c068..579951cecb1 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h @@ -15,6 +15,10 @@ namespace DB */ class AggregateFunctionCombinatorFactory final: public ext::singleton { +private: + using Dict = std::unordered_map; + Dict dict; + public: /// Not thread safe. You must register before using tryGet. void registerCombinator(const AggregateFunctionCombinatorPtr & value); @@ -22,8 +26,10 @@ public: /// Example: if the name is 'avgIf', it will return combinator -If. AggregateFunctionCombinatorPtr tryFindSuffix(const std::string & name) const; -private: - std::unordered_map dict; + const Dict & getAllAggregateFunctionCombinators() const + { + return dict; + } }; } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp b/dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp index eca854a031b..7876f0dcffb 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp @@ -13,6 +13,7 @@ #include #include +#include namespace DB @@ -41,6 +42,20 @@ void AggregateFunctionFactory::registerFunction(const String & name, Creator cre ErrorCodes::LOGICAL_ERROR); } +static DataTypes convertTypesWithDictionaryToNested(const DataTypes & types) +{ + DataTypes res_types; + res_types.reserve(types.size()); + for (const auto & type : types) + { + if (auto * type_with_dict = typeid_cast(type.get())) + res_types.push_back(type_with_dict->getDictionaryType()); + else + res_types.push_back(type); + } + + return res_types; +} AggregateFunctionPtr AggregateFunctionFactory::get( const String & name, @@ -48,6 +63,8 @@ AggregateFunctionPtr AggregateFunctionFactory::get( const Array & parameters, int recursion_level) const { + auto type_without_dictionary = convertTypesWithDictionaryToNested(argument_types); + /// If one of types is Nullable, we apply aggregate function combinator "Null". if (std::any_of(argument_types.begin(), argument_types.end(), @@ -57,7 +74,7 @@ AggregateFunctionPtr AggregateFunctionFactory::get( if (!combinator) throw Exception("Logical error: cannot find aggregate function combinator to apply a function to Nullable arguments.", ErrorCodes::LOGICAL_ERROR); - DataTypes nested_types = combinator->transformArguments(argument_types); + DataTypes nested_types = combinator->transformArguments(type_without_dictionary); AggregateFunctionPtr nested_function; @@ -70,7 +87,7 @@ AggregateFunctionPtr AggregateFunctionFactory::get( return combinator->transformAggregateFunction(nested_function, argument_types, parameters); } - auto res = getImpl(name, argument_types, parameters, recursion_level); + auto res = getImpl(name, type_without_dictionary, parameters, recursion_level); if (!res) throw Exception("Logical error: AggregateFunctionFactory returned nullptr", ErrorCodes::LOGICAL_ERROR); return res; @@ -78,11 +95,12 @@ AggregateFunctionPtr AggregateFunctionFactory::get( AggregateFunctionPtr AggregateFunctionFactory::getImpl( - const String & name, + const String & name_param, const DataTypes & argument_types, const Array & parameters, int recursion_level) const { + String name = getAliasToOrName(name_param); /// Find by exact match. auto it = aggregate_functions.find(name); if (it != aggregate_functions.end()) @@ -103,8 +121,8 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl( if (AggregateFunctionCombinatorPtr combinator = AggregateFunctionCombinatorFactory::instance().tryFindSuffix(name)) { - if (combinator->getName() == "Null") - throw Exception("Aggregate function combinator 'Null' is only for internal usage", ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION); + if (combinator->isForInternalUsageOnly()) + throw Exception("Aggregate function combinator '" + combinator->getName() + "' is only for internal usage", ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION); String nested_name = name.substr(0, name.size() - combinator->getName().size()); DataTypes nested_types = combinator->transformArguments(argument_types); @@ -126,10 +144,11 @@ AggregateFunctionPtr AggregateFunctionFactory::tryGet(const String & name, const bool AggregateFunctionFactory::isAggregateFunctionName(const String & name, int recursion_level) const { - if (aggregate_functions.count(name)) + if (aggregate_functions.count(name) || isAlias(name)) return true; - if (recursion_level == 0 && case_insensitive_aggregate_functions.count(Poco::toLower(name))) + String name_lowercase = Poco::toLower(name); + if (recursion_level == 0 && (case_insensitive_aggregate_functions.count(name_lowercase) || isAlias(name_lowercase))) return true; if (AggregateFunctionCombinatorPtr combinator = AggregateFunctionCombinatorFactory::instance().tryFindSuffix(name)) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionFactory.h b/dbms/src/AggregateFunctions/AggregateFunctionFactory.h index bc36e76c11f..92598e52509 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionFactory.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionFactory.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include @@ -20,27 +21,18 @@ class IDataType; using DataTypePtr = std::shared_ptr; using DataTypes = std::vector; +/** Creator have arguments: name of aggregate function, types of arguments, values of parameters. + * Parameters are for "parametric" aggregate functions. + * For example, in quantileWeighted(0.9)(x, weight), 0.9 is "parameter" and x, weight are "arguments". + */ +using AggregateFunctionCreator = std::function; + /** Creates an aggregate function by name. */ -class AggregateFunctionFactory final : public ext::singleton +class AggregateFunctionFactory final : public ext::singleton, public IFactoryWithAliases { - friend class StorageSystemFunctions; - public: - /** Creator have arguments: name of aggregate function, types of arguments, values of parameters. - * Parameters are for "parametric" aggregate functions. - * For example, in quantileWeighted(0.9)(x, weight), 0.9 is "parameter" and x, weight are "arguments". - */ - using Creator = std::function; - - /// For compatibility with SQL, it's possible to specify that certain aggregate function name is case insensitive. - enum CaseSensitiveness - { - CaseSensitive, - CaseInsensitive - }; - /// Register a function by its name. /// No locking, you must register all functions before usage of get. void registerFunction( @@ -77,6 +69,13 @@ private: /// Case insensitive aggregate functions will be additionally added here with lowercased name. AggregateFunctions case_insensitive_aggregate_functions; + + const AggregateFunctions & getCreatorMap() const override { return aggregate_functions; } + + const AggregateFunctions & getCaseInsensitiveCreatorMap() const override { return case_insensitive_aggregate_functions; } + + String getFactoryName() const override { return "AggregateFunctionFactory"; } + }; } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionHistogram.cpp b/dbms/src/AggregateFunctions/AggregateFunctionHistogram.cpp new file mode 100644 index 00000000000..de58d7a36d3 --- /dev/null +++ b/dbms/src/AggregateFunctions/AggregateFunctionHistogram.cpp @@ -0,0 +1,56 @@ +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int BAD_ARGUMENTS; + extern const int UNSUPPORTED_PARAMETER; + extern const int PARAMETER_OUT_OF_BOUND; +} + +namespace +{ + +AggregateFunctionPtr createAggregateFunctionHistogram(const std::string & name, const DataTypes & arguments, const Array & params) +{ + if (params.size() != 1) + throw Exception("Function " + name + " requires single parameter: bins count", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + + if (params[0].getType() != Field::Types::UInt64) + throw Exception("Invalid type for bins count", ErrorCodes::UNSUPPORTED_PARAMETER); + + UInt32 bins_count = applyVisitor(FieldVisitorConvertToNumber(), params[0]); + + auto limit = AggregateFunctionHistogramData::bins_count_limit; + if (bins_count > limit) + throw Exception("Unsupported bins count. Should not be greater than " + std::to_string(limit), ErrorCodes::PARAMETER_OUT_OF_BOUND); + + if (bins_count == 0) + throw Exception("Bin count should be positive", ErrorCodes::BAD_ARGUMENTS); + + assertUnary(name, arguments); + AggregateFunctionPtr res(createWithNumericType(*arguments[0], bins_count)); + + if (!res) + throw Exception("Illegal type " + arguments[0]->getName() + " of argument for aggregate function " + name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + return res; +} + +} + +void registerAggregateFunctionHistogram(AggregateFunctionFactory & factory) +{ + factory.registerFunction("histogram", createAggregateFunctionHistogram); +} + +} diff --git a/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h b/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h new file mode 100644 index 00000000000..be149c4898d --- /dev/null +++ b/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h @@ -0,0 +1,376 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int TOO_LARGE_ARRAY_SIZE; + extern const int INCORRECT_DATA; +} + +/** + * distance compression algorigthm implementation + * http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf + */ +class AggregateFunctionHistogramData +{ +public: + using Mean = Float64; + using Weight = Float64; + + constexpr static size_t bins_count_limit = 250; + +private: + struct WeightedValue + { + Mean mean; + Weight weight; + + WeightedValue operator+ (const WeightedValue& other) + { + return {mean + other.weight * (other.mean - mean) / (other.weight + weight), other.weight + weight}; + } + }; + +private: + // quantity of stored weighted-values + UInt32 size; + + // calculated lower and upper bounds of seen points + Mean lower_bound; + Mean upper_bound; + + // Weighted values representation of histogram. + WeightedValue points[0]; + +private: + void sort() + { + std::sort(points, points + size, + [](const WeightedValue & first, const WeightedValue & second) + { + return first.mean < second.mean; + }); + } + + template + struct PriorityQueueStorage + { + size_t size = 0; + T * data_ptr; + + PriorityQueueStorage(T * value) + : data_ptr(value) + { + } + + void push_back(T val) + { + data_ptr[size] = std::move(val); + ++size; + } + + void pop_back() { --size; } + T * begin() { return data_ptr; } + T * end() const { return data_ptr + size; } + bool empty() const { return size == 0; } + T & front() { return *data_ptr; } + const T & front() const { return *data_ptr; } + + using value_type = T; + using reference = T&; + using const_reference = const T&; + using size_type = size_t; + }; + + /** + * Repeatedly fuse most close values until max_bins bins left + */ + void compress(UInt32 max_bins) + { + sort(); + auto new_size = size; + if (size <= max_bins) + return; + + // Maintain doubly-linked list of "active" points + // and store neighbour pairs in priority queue by distance + UInt32 previous[size + 1]; + UInt32 next[size + 1]; + bool active[size + 1]; + std::fill(active, active + size, true); + active[size] = false; + + auto delete_node = [&](UInt32 i) + { + previous[next[i]] = previous[i]; + next[previous[i]] = next[i]; + active[i] = false; + }; + + for (size_t i = 0; i <= size; ++i) + { + previous[i] = i - 1; + next[i] = i + 1; + } + + next[size] = 0; + previous[0] = size; + + using QueueItem = std::pair; + + QueueItem storage[2 * size - max_bins]; + + std::priority_queue< + QueueItem, + PriorityQueueStorage, + std::greater> + queue{std::greater(), + PriorityQueueStorage(storage)}; + + auto quality = [&](UInt32 i) { return points[next[i]].mean - points[i].mean; }; + + for (size_t i = 0; i + 1 < size; ++i) + queue.push({quality(i), i}); + + while (new_size > max_bins && !queue.empty()) + { + auto min_item = queue.top(); + queue.pop(); + auto left = min_item.second; + auto right = next[left]; + + if (!active[left] || !active[right] || quality(left) > min_item.first) + continue; + + points[left] = points[left] + points[right]; + + delete_node(right); + if (active[next[left]]) + queue.push({quality(left), left}); + if (active[previous[left]]) + queue.push({quality(previous[left]), previous[left]}); + + --new_size; + } + + size_t left = 0; + for (size_t right = 0; right < size; ++right) + { + if (active[right]) + { + points[left] = points[right]; + ++left; + } + } + size = new_size; + } + + /*** + * Delete too close points from histogram. + * Assumes that points are sorted. + */ + void unique() + { + if (size == 0) + return; + + size_t left = 0; + + for (auto right = left + 1; right < size; ++right) + { + // Fuse points if their text representations differ only in last digit + auto min_diff = 10 * (points[left].mean + points[right].mean) * std::numeric_limits::epsilon(); + if (points[left].mean + min_diff >= points[right].mean) + { + points[left] = points[left] + points[right]; + } + else + { + ++left; + points[left] = points[right]; + } + } + size = left + 1; + } + +public: + AggregateFunctionHistogramData() + : size(0) + , lower_bound(std::numeric_limits::max()) + , upper_bound(std::numeric_limits::lowest()) + { + static_assert(offsetof(AggregateFunctionHistogramData, points) == sizeof(AggregateFunctionHistogramData), "points should be last member"); + } + + static size_t structSize(size_t max_bins) + { + return sizeof(AggregateFunctionHistogramData) + max_bins * 2 * sizeof(WeightedValue); + } + + void insertResultInto(ColumnVector & to_lower, ColumnVector & to_upper, ColumnVector & to_weights, UInt32 max_bins) + { + compress(max_bins); + unique(); + + for (size_t i = 0; i < size; ++i) + { + to_lower.insert((i == 0) ? lower_bound : (points[i].mean + points[i - 1].mean) / 2); + to_upper.insert((i + 1 == size) ? upper_bound : (points[i].mean + points[i + 1].mean) / 2); + + // linear density approximation + Weight lower_weight = (i == 0) ? points[i].weight : ((points[i - 1].weight) + points[i].weight * 3) / 4; + Weight upper_weight = (i + 1 == size) ? points[i].weight : (points[i + 1].weight + points[i].weight * 3) / 4; + to_weights.insert((lower_weight + upper_weight) / 2); + } + } + + void add(Mean value, Weight weight, UInt32 max_bins) + { + // nans break sort and compression + // infs don't fit in bins partition method + if (!isFinite(value)) + throw Exception("Invalid value (inf or nan) for aggregation by 'histogram' function", ErrorCodes::INCORRECT_DATA); + + points[size] = {value, weight}; + ++size; + lower_bound = std::min(lower_bound, value); + upper_bound = std::max(upper_bound, value); + + if (size >= max_bins * 2) + compress(max_bins); + } + + void merge(const AggregateFunctionHistogramData& other, UInt32 max_bins) + { + lower_bound = std::min(lower_bound, other.lower_bound); + upper_bound = std::max(lower_bound, other.upper_bound); + for (size_t i = 0; i < other.size; i++) + { + add(other.points[i].mean, other.points[i].weight, max_bins); + } + } + + void write(WriteBuffer & buf) const + { + buf.write(reinterpret_cast(&lower_bound), sizeof(lower_bound)); + buf.write(reinterpret_cast(&upper_bound), sizeof(upper_bound)); + + writeVarUInt(size, buf); + buf.write(reinterpret_cast(points), size * sizeof(WeightedValue)); + } + + void read(ReadBuffer & buf, UInt32 max_bins) + { + buf.read(reinterpret_cast(&lower_bound), sizeof(lower_bound)); + buf.read(reinterpret_cast(&upper_bound), sizeof(upper_bound)); + + readVarUInt(size, buf); + + if (size > max_bins * 2) + throw Exception("Too many bins", ErrorCodes::TOO_LARGE_ARRAY_SIZE); + + buf.read(reinterpret_cast(points), size * sizeof(WeightedValue)); + } +}; + +template +class AggregateFunctionHistogram final: public IAggregateFunctionDataHelper> +{ +private: + using Data = AggregateFunctionHistogramData; + + const UInt32 max_bins; + +public: + AggregateFunctionHistogram(UInt32 max_bins) + : max_bins(max_bins) + { + } + + size_t sizeOfData() const override + { + return Data::structSize(max_bins); + } + DataTypePtr getReturnType() const override + { + DataTypes types; + auto mean = std::make_shared>(); + auto weight = std::make_shared>(); + + // lower bound + types.emplace_back(mean); + // upper bound + types.emplace_back(mean); + // weight + types.emplace_back(weight); + + auto tuple = std::make_shared(types); + return std::make_shared(tuple); + } + + void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override + { + auto val = static_cast &>(*columns[0]).getData()[row_num]; + this->data(place).add(static_cast(val), 1, max_bins); + } + + void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override + { + this->data(place).merge(this->data(rhs), max_bins); + } + + void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override + { + this->data(place).write(buf); + } + + void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena *) const override + { + this->data(place).read(buf, max_bins); + } + + void insertResultInto(ConstAggregateDataPtr place, IColumn & to) const override + { + auto& data = this->data(const_cast(place)); + + auto & to_array = static_cast(to); + ColumnArray::Offsets & offsets_to = to_array.getOffsets(); + auto & to_tuple = static_cast(to_array.getData()); + + auto & to_lower = static_cast &>(to_tuple.getColumn(0)); + auto & to_upper = static_cast &>(to_tuple.getColumn(1)); + auto & to_weights = static_cast &>(to_tuple.getColumn(2)); + data.insertResultInto(to_lower, to_upper, to_weights, max_bins); + + offsets_to.push_back(to_tuple.size()); + } + + const char * getHeaderFilePath() const override { return __FILE__; } + + String getName() const override { return "histogram"; } +}; + +} diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.h b/dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.h index 11ec01e52e4..f20dc94e145 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.h @@ -137,7 +137,8 @@ public: /// const_cast because we will sort the array auto & array = const_cast::Array &>(this->data(place).value); - std::sort(array.begin(), array.end(), [](const auto & a, const auto & b) { return a.first < b.first; }); + /// Sort by position; for equal position, sort by weight to get deterministic result. + std::sort(array.begin(), array.end()); for (const auto & point_weight : array) { diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 4b92a6231fe..322307c2bcf 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -646,7 +646,7 @@ struct AggregateFunctionAnyHeavyData : Data } else { - if (counter < to.counter) + if ((!this->has() && to.has()) || counter < to.counter) { this->change(to, arena); return true; diff --git a/dbms/src/AggregateFunctions/AggregateFunctionNull.cpp b/dbms/src/AggregateFunctions/AggregateFunctionNull.cpp index 46a46a2370a..6ce7d94d970 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionNull.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionNull.cpp @@ -18,6 +18,8 @@ class AggregateFunctionCombinatorNull final : public IAggregateFunctionCombinato public: String getName() const override { return "Null"; } + bool isForInternalUsageOnly() const override { return true; } + DataTypes transformArguments(const DataTypes & arguments) const override { size_t size = arguments.size(); diff --git a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp b/dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp index 250ee422e8b..62455af6353 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp @@ -93,30 +93,14 @@ void registerAggregateFunctionsQuantile(AggregateFunctionFactory & factory) createAggregateFunctionQuantile); /// 'median' is an alias for 'quantile' - - factory.registerFunction("median", - createAggregateFunctionQuantile); - - factory.registerFunction("medianDeterministic", - createAggregateFunctionQuantile); - - factory.registerFunction("medianExact", - createAggregateFunctionQuantile); - - factory.registerFunction("medianExactWeighted", - createAggregateFunctionQuantile); - - factory.registerFunction("medianTiming", - createAggregateFunctionQuantile); - - factory.registerFunction("medianTimingWeighted", - createAggregateFunctionQuantile); - - factory.registerFunction("medianTDigest", - createAggregateFunctionQuantile); - - factory.registerFunction("medianTDigestWeighted", - createAggregateFunctionQuantile); + factory.registerAlias("median", NameQuantile::name); + factory.registerAlias("medianDeterministic", NameQuantileDeterministic::name); + factory.registerAlias("medianExact", NameQuantileExact::name); + factory.registerAlias("medianExactWeighted", NameQuantileExactWeighted::name); + factory.registerAlias("medianTiming", NameQuantileTiming::name); + factory.registerAlias("medianTimingWeighted", NameQuantileTimingWeighted::name); + factory.registerAlias("medianTDigest", NameQuantileTDigest::name); + factory.registerAlias("medianTDigestWeighted", NameQuantileTDigestWeighted::name); } } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp b/dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp index d6831ce1c43..882cedca1ef 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp @@ -24,10 +24,10 @@ namespace ErrorCodes namespace { + /** `DataForVariadic` is a data structure that will be used for `uniq` aggregate function of multiple arguments. * It differs, for example, in that it uses a trivial hash function, since `uniq` of many arguments first hashes them out itself. */ - template AggregateFunctionPtr createAggregateFunctionUniq(const std::string & name, const DataTypes & argument_types, const Array & params) { @@ -37,6 +37,8 @@ AggregateFunctionPtr createAggregateFunctionUniq(const std::string & name, const throw Exception("Incorrect number of arguments for aggregate function " + name, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + bool use_exact_hash_function = !isAllArgumentsContiguousInMemory(argument_types); + if (argument_types.size() == 1) { const IDataType & argument_type = *argument_types[0]; @@ -51,25 +53,25 @@ AggregateFunctionPtr createAggregateFunctionUniq(const std::string & name, const return std::make_shared>(); else if (typeid_cast(&argument_type) || typeid_cast(&argument_type)) return std::make_shared>(); - else if (typeid_cast(&argument_type)) - return std::make_shared>(argument_types); else if (typeid_cast(&argument_type)) return std::make_shared>(); - } - else - { - /// If there are several arguments, then no tuples allowed among them. - for (const auto & type : argument_types) - if (typeid_cast(type.get())) - throw Exception("Tuple argument of function " + name + " must be the only argument", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + else if (typeid_cast(&argument_type)) + { + if (use_exact_hash_function) + return std::make_shared>(argument_types); + else + return std::make_shared>(argument_types); + } } /// "Variadic" method also works as a fallback generic case for single argument. - return std::make_shared>(argument_types); + if (use_exact_hash_function) + return std::make_shared>(argument_types); + else + return std::make_shared>(argument_types); } -template