diff --git a/.github/ISSUE_TEMPLATE/40_bug-report.md b/.github/ISSUE_TEMPLATE/40_bug-report.md index 4dfd19266d0..97137366189 100644 --- a/.github/ISSUE_TEMPLATE/40_bug-report.md +++ b/.github/ISSUE_TEMPLATE/40_bug-report.md @@ -7,7 +7,7 @@ assignees: '' --- -(you don't have to strictly follow this form) +You have to provide the following information whenever possible. **Describe the bug** A clear and concise description of what works not as it is supposed to. diff --git a/.gitignore b/.gitignore index d33dbf0600d..1db6e0a78c9 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ /docs/zh/single.md /docs/ja/single.md /docs/fa/single.md +/docs/en/development/cmake-in-clickhouse.md # callgrind files callgrind.out.* diff --git a/.gitmodules b/.gitmodules index de7250166b8..66a2370f0da 100644 --- a/.gitmodules +++ b/.gitmodules @@ -17,6 +17,7 @@ [submodule "contrib/zlib-ng"] path = contrib/zlib-ng url = https://github.com/ClickHouse-Extras/zlib-ng.git + branch = clickhouse-new [submodule "contrib/googletest"] path = contrib/googletest url = https://github.com/google/googletest.git @@ -133,7 +134,7 @@ url = https://github.com/unicode-org/icu.git [submodule "contrib/flatbuffers"] path = contrib/flatbuffers - url = https://github.com/google/flatbuffers.git + url = https://github.com/ClickHouse-Extras/flatbuffers.git [submodule "contrib/libc-headers"] path = contrib/libc-headers url = https://github.com/ClickHouse-Extras/libc-headers.git @@ -221,6 +222,9 @@ [submodule "contrib/NuRaft"] path = contrib/NuRaft url = https://github.com/ClickHouse-Extras/NuRaft.git +[submodule "contrib/nanodbc"] + path = contrib/nanodbc + url = https://github.com/ClickHouse-Extras/nanodbc.git [submodule "contrib/datasketches-cpp"] path = contrib/datasketches-cpp url = https://github.com/ClickHouse-Extras/datasketches-cpp.git diff --git a/CHANGELOG.md b/CHANGELOG.md index 8590fefa66d..cc1ec835a7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,156 @@ +## ClickHouse release 21.4 + +### ClickHouse release 21.4.1 2021-04-12 + +#### Backward Incompatible Change + +* The `toStartOfIntervalFunction` will align hour intervals to the midnight (in previous versions they were aligned to the start of unix epoch). For example, `toStartOfInterval(x, INTERVAL 11 HOUR)` will split every day into three intervals: `00:00:00..10:59:59`, `11:00:00..21:59:59` and `22:00:00..23:59:59`. This behaviour is more suited for practical needs. This closes [#9510](https://github.com/ClickHouse/ClickHouse/issues/9510). [#22060](https://github.com/ClickHouse/ClickHouse/pull/22060) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* `Age` and `Precision` in graphite rollup configs should increase from retention to retention. Now it's checked and the wrong config raises an exception. [#21496](https://github.com/ClickHouse/ClickHouse/pull/21496) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix `cutToFirstSignificantSubdomainCustom()`/`firstSignificantSubdomainCustom()` returning wrong result for 3+ level domains present in custom top-level domain list. For input domains matching these custom top-level domains, the third-level domain was considered to be the first significant one. This is now fixed. This change may introduce incompatibility if the function is used in e.g. the sharding key. [#21946](https://github.com/ClickHouse/ClickHouse/pull/21946) ([Azat Khuzhin](https://github.com/azat)). +* Column `keys` in table `system.dictionaries` was replaced to columns `key.names` and `key.types`. Columns `key.names`, `key.types`, `attribute.names`, `attribute.types` from `system.dictionaries` table does not require dictionary to be loaded. [#21884](https://github.com/ClickHouse/ClickHouse/pull/21884) ([Maksim Kita](https://github.com/kitaisreal)). +* Now replicas that are processing the `ALTER TABLE ATTACH PART[ITION]` command search in their `detached/` folders before fetching the data from other replicas. As an implementation detail, a new command `ATTACH_PART` is introduced in the replicated log. Parts are searched and compared by their checksums. [#18978](https://github.com/ClickHouse/ClickHouse/pull/18978) ([Mike Kot](https://github.com/myrrc)). **Note**: + * `ATTACH PART[ITION]` queries may not work during cluster upgrade. + * It's not possible to rollback to older ClickHouse version after executing `ALTER ... ATTACH` query in new version as the old servers would fail to pass the `ATTACH_PART` entry in the replicated log. +* In this version, empty `` will block all access to remote hosts while in previous versions it did nothing. If you want to keep old behaviour and you have empty `remote_url_allow_hosts` element in configuration file, remove it. [#20058](https://github.com/ClickHouse/ClickHouse/pull/20058) ([Vladimir Chebotarev](https://github.com/excitoon)). + + +#### New Feature + +* Extended range of `DateTime64` to support dates from year 1925 to 2283. Improved support of `DateTime` around zero date (`1970-01-01`). [#9404](https://github.com/ClickHouse/ClickHouse/pull/9404) ([alexey-milovidov](https://github.com/alexey-milovidov), [Vasily Nemkov](https://github.com/Enmk)). Not every time and date functions are working for extended range of dates. +* Added support of Kerberos authentication for preconfigured users and HTTP requests (GSS-SPNEGO). [#14995](https://github.com/ClickHouse/ClickHouse/pull/14995) ([Denis Glazachev](https://github.com/traceon)). +* Add `prefer_column_name_to_alias` setting to use original column names instead of aliases. it is needed to be more compatible with common databases' aliasing rules. This is for [#9715](https://github.com/ClickHouse/ClickHouse/issues/9715) and [#9887](https://github.com/ClickHouse/ClickHouse/issues/9887). [#22044](https://github.com/ClickHouse/ClickHouse/pull/22044) ([Amos Bird](https://github.com/amosbird)). +* Added functions `dictGetChildren(dictionary, key)`, `dictGetDescendants(dictionary, key, level)`. Function `dictGetChildren` return all children as an array if indexes. It is a inverse transformation for `dictGetHierarchy`. Function `dictGetDescendants` return all descendants as if `dictGetChildren` was applied `level` times recursively. Zero `level` value is equivalent to infinity. Closes [#14656](https://github.com/ClickHouse/ClickHouse/issues/14656). [#22096](https://github.com/ClickHouse/ClickHouse/pull/22096) ([Maksim Kita](https://github.com/kitaisreal)). +* Added `executable_pool` dictionary source. Close [#14528](https://github.com/ClickHouse/ClickHouse/issues/14528). [#21321](https://github.com/ClickHouse/ClickHouse/pull/21321) ([Maksim Kita](https://github.com/kitaisreal)). +* Added table function `dictionary`. It works the same way as `Dictionary` engine. Closes [#21560](https://github.com/ClickHouse/ClickHouse/issues/21560). [#21910](https://github.com/ClickHouse/ClickHouse/pull/21910) ([Maksim Kita](https://github.com/kitaisreal)). +* Support `Nullable` type for `PolygonDictionary` attribute. [#21890](https://github.com/ClickHouse/ClickHouse/pull/21890) ([Maksim Kita](https://github.com/kitaisreal)). +* Functions `dictGet`, `dictHas` use current database name if it is not specified for dictionaries created with DDL. Closes [#21632](https://github.com/ClickHouse/ClickHouse/issues/21632). [#21859](https://github.com/ClickHouse/ClickHouse/pull/21859) ([Maksim Kita](https://github.com/kitaisreal)). +* Added function `dictGetOrNull`. It works like `dictGet`, but return `Null` in case key was not found in dictionary. Closes [#22375](https://github.com/ClickHouse/ClickHouse/issues/22375). [#22413](https://github.com/ClickHouse/ClickHouse/pull/22413) ([Maksim Kita](https://github.com/kitaisreal)). +* Added async update in `ComplexKeyCache`, `SSDCache`, `SSDComplexKeyCache` dictionaries. Added support for `Nullable` type in `Cache`, `ComplexKeyCache`, `SSDCache`, `SSDComplexKeyCache` dictionaries. Added support for multiple attributes fetch with `dictGet`, `dictGetOrDefault` functions. Fixes [#21517](https://github.com/ClickHouse/ClickHouse/issues/21517). [#20595](https://github.com/ClickHouse/ClickHouse/pull/20595) ([Maksim Kita](https://github.com/kitaisreal)). +* Support `dictHas` function for `RangeHashedDictionary`. Fixes [#6680](https://github.com/ClickHouse/ClickHouse/issues/6680). [#19816](https://github.com/ClickHouse/ClickHouse/pull/19816) ([Maksim Kita](https://github.com/kitaisreal)). +* Add function `timezoneOf` that returns the timezone name of `DateTime` or `DateTime64` data types. This does not close [#9959](https://github.com/ClickHouse/ClickHouse/issues/9959). Fix inconsistencies in function names: add aliases `timezone` and `timeZone` as well as `toTimezone` and `toTimeZone` and `timezoneOf` and `timeZoneOf`. [#22001](https://github.com/ClickHouse/ClickHouse/pull/22001) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add new optional clause `GRANTEES` for `CREATE/ALTER USER` commands. It specifies users or roles which are allowed to receive grants from this user on condition this user has also all required access granted with grant option. By default `GRANTEES ANY` is used which means a user with grant option can grant to anyone. Syntax: `CREATE USER ... GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]`. [#21641](https://github.com/ClickHouse/ClickHouse/pull/21641) ([Vitaly Baranov](https://github.com/vitlibar)). +* Add new column `slowdowns_count` to `system.clusters`. When using hedged requests, it shows how many times we switched to another replica because this replica was responding slowly. Also show actual value of `errors_count` in `system.clusters`. [#21480](https://github.com/ClickHouse/ClickHouse/pull/21480) ([Kruglov Pavel](https://github.com/Avogar)). +* Add `_partition_id` virtual column for `MergeTree*` engines. Allow to prune partitions by `_partition_id`. Add `partitionID()` function to calculate partition id string. [#21401](https://github.com/ClickHouse/ClickHouse/pull/21401) ([Amos Bird](https://github.com/amosbird)). +* Add function `isIPAddressInRange` to test if an IPv4 or IPv6 address is contained in a given CIDR network prefix. [#21329](https://github.com/ClickHouse/ClickHouse/pull/21329) ([PHO](https://github.com/depressed-pho)). +* Added new SQL command `ALTER TABLE 'table_name' UNFREEZE [PARTITION 'part_expr'] WITH NAME 'backup_name'`. This command is needed to properly remove 'freezed' partitions from all disks. [#21142](https://github.com/ClickHouse/ClickHouse/pull/21142) ([Pavel Kovalenko](https://github.com/Jokser)). +* Supports implicit key type conversion for JOIN. [#19885](https://github.com/ClickHouse/ClickHouse/pull/19885) ([Vladimir](https://github.com/vdimir)). + +#### Experimental Feature + +* Support `RANGE OFFSET` frame (for window functions) for floating point types. Implement `lagInFrame`/`leadInFrame` window functions, which are analogous to `lag`/`lead`, but respect the window frame. They are identical when the frame is `between unbounded preceding and unbounded following`. This closes [#5485](https://github.com/ClickHouse/ClickHouse/issues/5485). [#21895](https://github.com/ClickHouse/ClickHouse/pull/21895) ([Alexander Kuzmenkov](https://github.com/akuzm)). +* Zero-copy replication for `ReplicatedMergeTree` over S3 storage. [#16240](https://github.com/ClickHouse/ClickHouse/pull/16240) ([ianton-ru](https://github.com/ianton-ru)). +* Added possibility to migrate existing S3 disk to the schema with backup-restore capabilities. [#22070](https://github.com/ClickHouse/ClickHouse/pull/22070) ([Pavel Kovalenko](https://github.com/Jokser)). + +#### Performance Improvement + +* Supported parallel formatting in `clickhouse-local` and everywhere else. [#21630](https://github.com/ClickHouse/ClickHouse/pull/21630) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Support parallel parsing for `CSVWithNames` and `TSVWithNames` formats. This closes [#21085](https://github.com/ClickHouse/ClickHouse/issues/21085). [#21149](https://github.com/ClickHouse/ClickHouse/pull/21149) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Enable read with mmap IO for file ranges from 64 MiB (the settings `min_bytes_to_use_mmap_io`). It may lead to moderate performance improvement. [#22326](https://github.com/ClickHouse/ClickHouse/pull/22326) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add cache for files read with `min_bytes_to_use_mmap_io` setting. It makes significant (2x and more) performance improvement when the value of the setting is small by avoiding frequent mmap/munmap calls and the consequent page faults. Note that mmap IO has major drawbacks that makes it less reliable in production (e.g. hung or SIGBUS on faulty disks; less controllable memory usage). Nevertheless it is good in benchmarks. [#22206](https://github.com/ClickHouse/ClickHouse/pull/22206) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Avoid unnecessary data copy when using codec `NONE`. Please note that codec `NONE` is mostly useless - it's recommended to always use compression (`LZ4` is by default). Despite the common belief, disabling compression may not improve performance (the opposite effect is possible). The `NONE` codec is useful in some cases: - when data is uncompressable; - for synthetic benchmarks. [#22145](https://github.com/ClickHouse/ClickHouse/pull/22145) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Faster `GROUP BY` with small `max_rows_to_group_by` and `group_by_overflow_mode='any'`. [#21856](https://github.com/ClickHouse/ClickHouse/pull/21856) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Optimize performance of queries like `SELECT ... FINAL ... WHERE`. Now in queries with `FINAL` it's allowed to move to `PREWHERE` columns, which are in sorting key. [#21830](https://github.com/ClickHouse/ClickHouse/pull/21830) ([foolchi](https://github.com/foolchi)). +* Improved performance by replacing `memcpy` to another implementation. This closes [#18583](https://github.com/ClickHouse/ClickHouse/issues/18583). [#21520](https://github.com/ClickHouse/ClickHouse/pull/21520) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Improve performance of aggregation in order of sorting key (with enabled setting `optimize_aggregation_in_order`). [#19401](https://github.com/ClickHouse/ClickHouse/pull/19401) ([Anton Popov](https://github.com/CurtizJ)). + +#### Improvement + +* Add connection pool for PostgreSQL table/database engine and dictionary source. Should fix [#21444](https://github.com/ClickHouse/ClickHouse/issues/21444). [#21839](https://github.com/ClickHouse/ClickHouse/pull/21839) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support non-default table schema for postgres storage/table-function. Closes [#21701](https://github.com/ClickHouse/ClickHouse/issues/21701). [#21711](https://github.com/ClickHouse/ClickHouse/pull/21711) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support replicas priority for postgres dictionary source. [#21710](https://github.com/ClickHouse/ClickHouse/pull/21710) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Introduce a new merge tree setting `min_bytes_to_rebalance_partition_over_jbod` which allows assigning new parts to different disks of a JBOD volume in a balanced way. [#16481](https://github.com/ClickHouse/ClickHouse/pull/16481) ([Amos Bird](https://github.com/amosbird)). +* Added `Grant`, `Revoke` and `System` values of `query_kind` column for corresponding queries in `system.query_log`. [#21102](https://github.com/ClickHouse/ClickHouse/pull/21102) ([Vasily Nemkov](https://github.com/Enmk)). +* Allow customizing timeouts for HTTP connections used for replication independently from other HTTP timeouts. [#20088](https://github.com/ClickHouse/ClickHouse/pull/20088) ([nvartolomei](https://github.com/nvartolomei)). +* Better exception message in client in case of exception while server is writing blocks. In previous versions client may get misleading message like `Data compressed with different methods`. [#22427](https://github.com/ClickHouse/ClickHouse/pull/22427) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix error `Directory tmp_fetch_XXX already exists` which could happen after failed fetch part. Delete temporary fetch directory if it already exists. Fixes [#14197](https://github.com/ClickHouse/ClickHouse/issues/14197). [#22411](https://github.com/ClickHouse/ClickHouse/pull/22411) ([nvartolomei](https://github.com/nvartolomei)). +* Fix MSan report for function `range` with `UInt256` argument (support for large integers is experimental). This closes [#22157](https://github.com/ClickHouse/ClickHouse/issues/22157). [#22387](https://github.com/ClickHouse/ClickHouse/pull/22387) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add `current_database` column to `system.processes` table. It contains the current database of the query. [#22365](https://github.com/ClickHouse/ClickHouse/pull/22365) ([Alexander Kuzmenkov](https://github.com/akuzm)). +* Add case-insensitive history search/navigation and subword movement features to `clickhouse-client`. [#22105](https://github.com/ClickHouse/ClickHouse/pull/22105) ([Amos Bird](https://github.com/amosbird)). +* If tuple of NULLs, e.g. `(NULL, NULL)` is on the left hand side of `IN` operator with tuples of non-NULLs on the right hand side, e.g. `SELECT (NULL, NULL) IN ((0, 0), (3, 1))` return 0 instead of throwing an exception about incompatible types. The expression may also appear due to optimization of something like `SELECT (NULL, NULL) = (8, 0) OR (NULL, NULL) = (3, 2) OR (NULL, NULL) = (0, 0) OR (NULL, NULL) = (3, 1)`. This closes [#22017](https://github.com/ClickHouse/ClickHouse/issues/22017). [#22063](https://github.com/ClickHouse/ClickHouse/pull/22063) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Update used version of simdjson to 0.9.1. This fixes [#21984](https://github.com/ClickHouse/ClickHouse/issues/21984). [#22057](https://github.com/ClickHouse/ClickHouse/pull/22057) ([Vitaly Baranov](https://github.com/vitlibar)). +* Added case insensitive aliases for `CONNECTION_ID()` and `VERSION()` functions. This fixes [#22028](https://github.com/ClickHouse/ClickHouse/issues/22028). [#22042](https://github.com/ClickHouse/ClickHouse/pull/22042) ([Eugene Klimov](https://github.com/Slach)). +* Add option `strict_increase` to `windowFunnel` function to calculate each event once (resolve [#21835](https://github.com/ClickHouse/ClickHouse/issues/21835)). [#22025](https://github.com/ClickHouse/ClickHouse/pull/22025) ([Vladimir](https://github.com/vdimir)). +* If partition key of a `MergeTree` table does not include `Date` or `DateTime` columns but includes exactly one `DateTime64` column, expose its values in the `min_time` and `max_time` columns in `system.parts` and `system.parts_columns` tables. Add `min_time` and `max_time` columns to `system.parts_columns` table (these was inconsistency to the `system.parts` table). This closes [#18244](https://github.com/ClickHouse/ClickHouse/issues/18244). [#22011](https://github.com/ClickHouse/ClickHouse/pull/22011) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Supported `replication_alter_partitions_sync=1` setting in `clickhouse-copier` for moving partitions from helping table to destination. Decreased default timeouts. Fixes [#21911](https://github.com/ClickHouse/ClickHouse/issues/21911). [#21912](https://github.com/ClickHouse/ClickHouse/pull/21912) ([turbo jason](https://github.com/songenjie)). +* Show path to data directory of `EmbeddedRocksDB` tables in system tables. [#21903](https://github.com/ClickHouse/ClickHouse/pull/21903) ([tavplubix](https://github.com/tavplubix)). +* Add profile event `HedgedRequestsChangeReplica`, change read data timeout from sec to ms. [#21886](https://github.com/ClickHouse/ClickHouse/pull/21886) ([Kruglov Pavel](https://github.com/Avogar)). +* DiskS3 (experimental feature under development). Fixed bug with the impossibility to move directory if the destination is not empty and cache disk is used. [#21837](https://github.com/ClickHouse/ClickHouse/pull/21837) ([Pavel Kovalenko](https://github.com/Jokser)). +* Better formatting for `Array` and `Map` data types in Web UI. [#21798](https://github.com/ClickHouse/ClickHouse/pull/21798) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Update clusters only if their configurations were updated. [#21685](https://github.com/ClickHouse/ClickHouse/pull/21685) ([Kruglov Pavel](https://github.com/Avogar)). +* Propagate query and session settings for distributed DDL queries. Set `distributed_ddl_entry_format_version` to 2 to enable this. Added `distributed_ddl_output_mode` setting. Supported modes: `none`, `throw` (default), `null_status_on_timeout` and `never_throw`. Miscellaneous fixes and improvements for `Replicated` database engine. [#21535](https://github.com/ClickHouse/ClickHouse/pull/21535) ([tavplubix](https://github.com/tavplubix)). +* If `PODArray` was instantiated with element size that is neither a fraction or a multiple of 16, buffer overflow was possible. No bugs in current releases exist. [#21533](https://github.com/ClickHouse/ClickHouse/pull/21533) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add `last_error_time`/`last_error_message`/`last_error_stacktrace`/`remote` columns for `system.errors`. [#21529](https://github.com/ClickHouse/ClickHouse/pull/21529) ([Azat Khuzhin](https://github.com/azat)). +* Add aliases `simpleJSONExtract/simpleJSONHas` to `visitParam/visitParamExtract{UInt, Int, Bool, Float, Raw, String}`. Fixes #21383. [#21519](https://github.com/ClickHouse/ClickHouse/pull/21519) ([fastio](https://github.com/fastio)). +* Add setting `optimize_skip_unused_shards_limit` to limit the number of sharding key values for `optimize_skip_unused_shards`. [#21512](https://github.com/ClickHouse/ClickHouse/pull/21512) ([Azat Khuzhin](https://github.com/azat)). +* Improve `clickhouse-format` to not throw exception when there are extra spaces or comment after the last query, and throw exception early with readable message when format `ASTInsertQuery` with data . [#21311](https://github.com/ClickHouse/ClickHouse/pull/21311) ([flynn](https://github.com/ucasFL)). +* Improve support of integer keys in data type `Map`. [#21157](https://github.com/ClickHouse/ClickHouse/pull/21157) ([Anton Popov](https://github.com/CurtizJ)). +* MaterializeMySQL: attempt to reconnect to MySQL if the connection is lost. [#20961](https://github.com/ClickHouse/ClickHouse/pull/20961) ([Håvard Kvålen](https://github.com/havardk)). +* Support more cases to rewrite `CROSS JOIN` to `INNER JOIN`. [#20392](https://github.com/ClickHouse/ClickHouse/pull/20392) ([Vladimir](https://github.com/vdimir)). +* Do not create empty parts on INSERT when `optimize_on_insert` setting enabled. Fixes [#20304](https://github.com/ClickHouse/ClickHouse/issues/20304). [#20387](https://github.com/ClickHouse/ClickHouse/pull/20387) ([Kruglov Pavel](https://github.com/Avogar)). +* `MaterializeMySQL`: add minmax skipping index for `_version` column. [#20382](https://github.com/ClickHouse/ClickHouse/pull/20382) ([Stig Bakken](https://github.com/stigsb)). +* Add option `--backslash` for `clickhouse-format`, which can add a backslash at the end of each line of the formatted query. [#21494](https://github.com/ClickHouse/ClickHouse/pull/21494) ([flynn](https://github.com/ucasFL)). +* Now clickhouse will not throw `LOGICAL_ERROR` exception when we try to mutate the already covered part. Fixes [#22013](https://github.com/ClickHouse/ClickHouse/issues/22013). [#22291](https://github.com/ClickHouse/ClickHouse/pull/22291) ([alesapin](https://github.com/alesapin)). + +#### Bug Fix + +* Remove socket from epoll before cancelling packet receiver in `HedgedConnections` to prevent possible race. Fixes [#22161](https://github.com/ClickHouse/ClickHouse/issues/22161). [#22443](https://github.com/ClickHouse/ClickHouse/pull/22443) ([Kruglov Pavel](https://github.com/Avogar)). +* Add (missing) memory accounting in parallel parsing routines. In previous versions OOM was possible when the resultset contains very large blocks of data. This closes [#22008](https://github.com/ClickHouse/ClickHouse/issues/22008). [#22425](https://github.com/ClickHouse/ClickHouse/pull/22425) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Fix exception which may happen when `SELECT` has constant `WHERE` condition and source table has columns which names are digits. [#22270](https://github.com/ClickHouse/ClickHouse/pull/22270) ([LiuNeng](https://github.com/liuneng1994)). +* Fix query cancellation with `use_hedged_requests=0` and `async_socket_for_remote=1`. [#22183](https://github.com/ClickHouse/ClickHouse/pull/22183) ([Azat Khuzhin](https://github.com/azat)). +* Fix uncaught exception in `InterserverIOHTTPHandler`. [#22146](https://github.com/ClickHouse/ClickHouse/pull/22146) ([Azat Khuzhin](https://github.com/azat)). +* Fix docker entrypoint in case `http_port` is not in the config. [#22132](https://github.com/ClickHouse/ClickHouse/pull/22132) ([Ewout](https://github.com/devwout)). +* Fix error `Invalid number of rows in Chunk` in `JOIN` with `TOTALS` and `arrayJoin`. Closes [#19303](https://github.com/ClickHouse/ClickHouse/issues/19303). [#22129](https://github.com/ClickHouse/ClickHouse/pull/22129) ([Vladimir](https://github.com/vdimir)). +* Fix the background thread pool name which used to poll message from Kafka. The Kafka engine with the broken thread pool will not consume the message from message queue. [#22122](https://github.com/ClickHouse/ClickHouse/pull/22122) ([fastio](https://github.com/fastio)). +* Fix waiting for `OPTIMIZE` and `ALTER` queries for `ReplicatedMergeTree` table engines. Now the query will not hang when the table was detached or restarted. [#22118](https://github.com/ClickHouse/ClickHouse/pull/22118) ([alesapin](https://github.com/alesapin)). +* Disable `async_socket_for_remote`/`use_hedged_requests` for buggy Linux kernels. [#22109](https://github.com/ClickHouse/ClickHouse/pull/22109) ([Azat Khuzhin](https://github.com/azat)). +* Docker entrypoint: avoid chown of `.` in case when `LOG_PATH` is empty. Closes [#22100](https://github.com/ClickHouse/ClickHouse/issues/22100). [#22102](https://github.com/ClickHouse/ClickHouse/pull/22102) ([filimonov](https://github.com/filimonov)). +* The function `decrypt` was lacking a check for the minimal size of data encrypted in `AEAD` mode. This closes [#21897](https://github.com/ClickHouse/ClickHouse/issues/21897). [#22064](https://github.com/ClickHouse/ClickHouse/pull/22064) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* In rare case, merge for `CollapsingMergeTree` may create granule with `index_granularity + 1` rows. Because of this, internal check, added in [#18928](https://github.com/ClickHouse/ClickHouse/issues/18928) (affects 21.2 and 21.3), may fail with error `Incomplete granules are not allowed while blocks are granules size`. This error did not allow parts to merge. [#21976](https://github.com/ClickHouse/ClickHouse/pull/21976) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Reverted [#15454](https://github.com/ClickHouse/ClickHouse/issues/15454) that may cause significant increase in memory usage while loading external dictionaries of hashed type. This closes [#21935](https://github.com/ClickHouse/ClickHouse/issues/21935). [#21948](https://github.com/ClickHouse/ClickHouse/pull/21948) ([Maksim Kita](https://github.com/kitaisreal)). +* Prevent hedged connections overlaps (`Unknown packet 9 from server` error). [#21941](https://github.com/ClickHouse/ClickHouse/pull/21941) ([Azat Khuzhin](https://github.com/azat)). +* Fix reading the HTTP POST request with "multipart/form-data" content type in some cases. [#21936](https://github.com/ClickHouse/ClickHouse/pull/21936) ([Ivan](https://github.com/abyss7)). +* Fix wrong `ORDER BY` results when a query contains window functions, and optimization for reading in primary key order is applied. Fixes [#21828](https://github.com/ClickHouse/ClickHouse/issues/21828). [#21915](https://github.com/ClickHouse/ClickHouse/pull/21915) ([Alexander Kuzmenkov](https://github.com/akuzm)). +* Fix deadlock in first catboost model execution. Closes [#13832](https://github.com/ClickHouse/ClickHouse/issues/13832). [#21844](https://github.com/ClickHouse/ClickHouse/pull/21844) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix incorrect query result (and possible crash) which could happen when `WHERE` or `HAVING` condition is pushed before `GROUP BY`. Fixes [#21773](https://github.com/ClickHouse/ClickHouse/issues/21773). [#21841](https://github.com/ClickHouse/ClickHouse/pull/21841) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Better error handling and logging in `WriteBufferFromS3`. [#21836](https://github.com/ClickHouse/ClickHouse/pull/21836) ([Pavel Kovalenko](https://github.com/Jokser)). +* Fix possible crashes in aggregate functions with combinator `Distinct`, while using two-level aggregation. This is a follow-up fix of [#18365](https://github.com/ClickHouse/ClickHouse/pull/18365) . Can only reproduced in production env. [#21818](https://github.com/ClickHouse/ClickHouse/pull/21818) ([Amos Bird](https://github.com/amosbird)). +* Fix scalar subquery index analysis. This fixes [#21717](https://github.com/ClickHouse/ClickHouse/issues/21717) , which was introduced in [#18896](https://github.com/ClickHouse/ClickHouse/pull/18896). [#21766](https://github.com/ClickHouse/ClickHouse/pull/21766) ([Amos Bird](https://github.com/amosbird)). +* Fix bug for `ReplicatedMerge` table engines when `ALTER MODIFY COLUMN` query doesn't change the type of `Decimal` column if its size (32 bit or 64 bit) doesn't change. [#21728](https://github.com/ClickHouse/ClickHouse/pull/21728) ([alesapin](https://github.com/alesapin)). +* Fix possible infinite waiting when concurrent `OPTIMIZE` and `DROP` are run for `ReplicatedMergeTree`. [#21716](https://github.com/ClickHouse/ClickHouse/pull/21716) ([Azat Khuzhin](https://github.com/azat)). +* Fix function `arrayElement` with type `Map` for constant integer arguments. [#21699](https://github.com/ClickHouse/ClickHouse/pull/21699) ([Anton Popov](https://github.com/CurtizJ)). +* Fix SIGSEGV on not existing attributes from `ip_trie` with `access_to_key_from_attributes`. [#21692](https://github.com/ClickHouse/ClickHouse/pull/21692) ([Azat Khuzhin](https://github.com/azat)). +* Server now start accepting connections only after `DDLWorker` and dictionaries initialization. [#21676](https://github.com/ClickHouse/ClickHouse/pull/21676) ([Azat Khuzhin](https://github.com/azat)). +* Add type conversion for keys of tables of type `Join` (previously led to SIGSEGV). [#21646](https://github.com/ClickHouse/ClickHouse/pull/21646) ([Azat Khuzhin](https://github.com/azat)). +* Fix distributed requests cancellation (for example simple select from multiple shards with limit, i.e. `select * from remote('127.{2,3}', system.numbers) limit 100`) with `async_socket_for_remote=1`. [#21643](https://github.com/ClickHouse/ClickHouse/pull/21643) ([Azat Khuzhin](https://github.com/azat)). +* Fix `fsync_part_directory` for horizontal merge. [#21642](https://github.com/ClickHouse/ClickHouse/pull/21642) ([Azat Khuzhin](https://github.com/azat)). +* Remove unknown columns from joined table in `WHERE` for queries to external database engines (MySQL, PostgreSQL). close [#14614](https://github.com/ClickHouse/ClickHouse/issues/14614), close [#19288](https://github.com/ClickHouse/ClickHouse/issues/19288) (dup), close [#19645](https://github.com/ClickHouse/ClickHouse/issues/19645) (dup). [#21640](https://github.com/ClickHouse/ClickHouse/pull/21640) ([Vladimir](https://github.com/vdimir)). +* `std::terminate` was called if there is an error writing data into s3. [#21624](https://github.com/ClickHouse/ClickHouse/pull/21624) ([Vladimir](https://github.com/vdimir)). +* Fix possible error `Cannot find column` when `optimize_skip_unused_shards` is enabled and zero shards are used. [#21579](https://github.com/ClickHouse/ClickHouse/pull/21579) ([Azat Khuzhin](https://github.com/azat)). +* In case if query has constant `WHERE` condition, and setting `optimize_skip_unused_shards` enabled, all shards may be skipped and query could return incorrect empty result. [#21550](https://github.com/ClickHouse/ClickHouse/pull/21550) ([Amos Bird](https://github.com/amosbird)). +* Fix table function `clusterAllReplicas` returns wrong `_shard_num`. close [#21481](https://github.com/ClickHouse/ClickHouse/issues/21481). [#21498](https://github.com/ClickHouse/ClickHouse/pull/21498) ([flynn](https://github.com/ucasFL)). +* Fix that S3 table holds old credentials after config update. [#21457](https://github.com/ClickHouse/ClickHouse/pull/21457) ([Grigory Pervakov](https://github.com/GrigoryPervakov)). +* Fixed race on SSL object inside `SecureSocket` in Poco. [#21456](https://github.com/ClickHouse/ClickHouse/pull/21456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix `Avro` format parsing for `Kafka`. Fixes [#21437](https://github.com/ClickHouse/ClickHouse/issues/21437). [#21438](https://github.com/ClickHouse/ClickHouse/pull/21438) ([Ilya Golshtein](https://github.com/ilejn)). +* Fix receive and send timeouts and non-blocking read in secure socket. [#21429](https://github.com/ClickHouse/ClickHouse/pull/21429) ([Kruglov Pavel](https://github.com/Avogar)). +* `force_drop_table` flag didn't work for `MATERIALIZED VIEW`, it's fixed. Fixes [#18943](https://github.com/ClickHouse/ClickHouse/issues/18943). [#20626](https://github.com/ClickHouse/ClickHouse/pull/20626) ([tavplubix](https://github.com/tavplubix)). +* Fix name clashes in `PredicateRewriteVisitor`. It caused incorrect `WHERE` filtration after full join. Close [#20497](https://github.com/ClickHouse/ClickHouse/issues/20497). [#20622](https://github.com/ClickHouse/ClickHouse/pull/20622) ([Vladimir](https://github.com/vdimir)). + +#### Build/Testing/Packaging Improvement + +* Add [Jepsen](https://github.com/jepsen-io/jepsen) tests for ClickHouse Keeper. [#21677](https://github.com/ClickHouse/ClickHouse/pull/21677) ([alesapin](https://github.com/alesapin)). +* Run stateless tests in parallel in CI. Depends on [#22181](https://github.com/ClickHouse/ClickHouse/issues/22181). [#22300](https://github.com/ClickHouse/ClickHouse/pull/22300) ([alesapin](https://github.com/alesapin)). +* Enable status check for [SQLancer](https://github.com/sqlancer/sqlancer) CI run. [#22015](https://github.com/ClickHouse/ClickHouse/pull/22015) ([Ilya Yatsishin](https://github.com/qoega)). +* Multiple preparations for PowerPC builds: Enable the bundled openldap on `ppc64le`. [#22487](https://github.com/ClickHouse/ClickHouse/pull/22487) ([Kfir Itzhak](https://github.com/mastertheknife)). Enable compiling on `ppc64le` with Clang. [#22476](https://github.com/ClickHouse/ClickHouse/pull/22476) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix compiling boost on `ppc64le`. [#22474](https://github.com/ClickHouse/ClickHouse/pull/22474) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix CMake error about internal CMake variable `CMAKE_ASM_COMPILE_OBJECT` not set on `ppc64le`. [#22469](https://github.com/ClickHouse/ClickHouse/pull/22469) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix Fedora/RHEL/CentOS not finding `libclang_rt.builtins` on `ppc64le`. [#22458](https://github.com/ClickHouse/ClickHouse/pull/22458) ([Kfir Itzhak](https://github.com/mastertheknife)). Enable building with `jemalloc` on `ppc64le`. [#22447](https://github.com/ClickHouse/ClickHouse/pull/22447) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix ClickHouse's config embedding and cctz's timezone embedding on `ppc64le`. [#22445](https://github.com/ClickHouse/ClickHouse/pull/22445) ([Kfir Itzhak](https://github.com/mastertheknife)). Fixed compiling on `ppc64le` and use the correct instruction pointer register on `ppc64le`. [#22430](https://github.com/ClickHouse/ClickHouse/pull/22430) ([Kfir Itzhak](https://github.com/mastertheknife)). +* Re-enable the S3 (AWS) library on `aarch64`. [#22484](https://github.com/ClickHouse/ClickHouse/pull/22484) ([Kfir Itzhak](https://github.com/mastertheknife)). +* Add `tzdata` to Docker containers because reading `ORC` formats requires it. This closes [#14156](https://github.com/ClickHouse/ClickHouse/issues/14156). [#22000](https://github.com/ClickHouse/ClickHouse/pull/22000) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Introduce 2 arguments for `clickhouse-server` image Dockerfile: `deb_location` & `single_binary_location`. [#21977](https://github.com/ClickHouse/ClickHouse/pull/21977) ([filimonov](https://github.com/filimonov)). +* Allow to use clang-tidy with release builds by enabling assertions if it is used. [#21914](https://github.com/ClickHouse/ClickHouse/pull/21914) ([alexey-milovidov](https://github.com/alexey-milovidov)). +* Add llvm-12 binaries name to search in cmake scripts. Implicit constants conversions to mute clang warnings. Updated submodules to build with CMake 3.19. Mute recursion in macro expansion in `readpassphrase` library. Deprecated `-fuse-ld` changed to `--ld-path` for clang. [#21597](https://github.com/ClickHouse/ClickHouse/pull/21597) ([Ilya Yatsishin](https://github.com/qoega)). +* Updating `docker/test/testflows/runner/dockerd-entrypoint.sh` to use Yandex dockerhub-proxy, because Docker Hub has enabled very restrictive rate limits [#21551](https://github.com/ClickHouse/ClickHouse/pull/21551) ([vzakaznikov](https://github.com/vzakaznikov)). +* Fix macOS shared lib build. [#20184](https://github.com/ClickHouse/ClickHouse/pull/20184) ([nvartolomei](https://github.com/nvartolomei)). +* Add `ctime` option to `zookeeper-dump-tree`. It allows to dump node creation time. [#21842](https://github.com/ClickHouse/ClickHouse/pull/21842) ([Ilya](https://github.com/HumanUser)). + + ## ClickHouse release 21.3 (LTS) ### ClickHouse release v21.3, 2021-03-12 diff --git a/CMakeLists.txt b/CMakeLists.txt index c4d429c565f..2c3fa088995 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -68,17 +68,30 @@ endif () include (cmake/find/ccache.cmake) -option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling" OFF) +# Take care to add prlimit in command line before ccache, or else ccache thinks that +# prlimit is compiler, and clang++ is its input file, and refuses to work with +# multiple inputs, e.g in ccache log: +# [2021-03-31T18:06:32.655327 36900] Command line: /usr/bin/ccache prlimit --as=10000000000 --data=5000000000 --cpu=600 /usr/bin/clang++-11 - ...... std=gnu++2a -MD -MT src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -MF src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o.d -o src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -c ../src/Storages/MergeTree/IMergeTreeDataPart.cpp +# +# [2021-03-31T18:06:32.656704 36900] Multiple input files: /usr/bin/clang++-11 and ../src/Storages/MergeTree/IMergeTreeDataPart.cpp +# +# Another way would be to use --ccache-skip option before clang++-11 to make +# ccache ignore it. +option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling." OFF) if (ENABLE_CHECK_HEAVY_BUILDS) # set DATA (since RSS does not work since 2.6.x+) to 2G set (RLIMIT_DATA 5000000000) # set VIRT (RLIMIT_AS) to 10G (DATA*10) set (RLIMIT_AS 10000000000) + # set CPU time limit to 600 seconds + set (RLIMIT_CPU 600) + # gcc10/gcc10/clang -fsanitize=memory is too heavy if (SANITIZE STREQUAL "memory" OR COMPILER_GCC) set (RLIMIT_DATA 10000000000) endif() - set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=600) + + set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER}) endif () if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None") @@ -154,9 +167,10 @@ endif () # If turned `ON`, assumes the user has either the system GTest library or the bundled one. option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON) +option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF) -if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0") - # Only for Linux, x86_64. +if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0") + # Only for Linux, x86_64 or aarch64. option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON) elseif(GLIBC_COMPATIBILITY) message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration") @@ -244,12 +258,17 @@ endif() include(cmake/cpu_features.cmake) -option(ARCH_NATIVE "Add -march=native compiler flag") +option(ARCH_NATIVE "Add -march=native compiler flag. This makes your binaries non-portable but more performant code may be generated.") if (ARCH_NATIVE) set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native") endif () +# Asynchronous unwind tables are needed for Query Profiler. +# They are already by default on some platforms but possibly not on all platforms. +# Enable it explicitly. +set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables") + if (${CMAKE_VERSION} VERSION_LESS "3.12.4") # CMake < 3.12 doesn't support setting 20 as a C++ standard version. # We will add C++ standard controlling flag in CMAKE_CXX_FLAGS manually for now. @@ -277,6 +296,12 @@ if (COMPILER_GCC OR COMPILER_CLANG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation") endif () +# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable +# benchmarks. +if (COMPILER_GCC OR COMPILER_CLANG) + set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32") +endif () + # Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF) @@ -475,6 +500,7 @@ include (cmake/find/krb5.cmake) include (cmake/find/libgsasl.cmake) include (cmake/find/cyrus-sasl.cmake) include (cmake/find/rdkafka.cmake) +include (cmake/find/libuv.cmake) # for amqpcpp and cassandra include (cmake/find/amqpcpp.cmake) include (cmake/find/capnp.cmake) include (cmake/find/llvm.cmake) @@ -497,6 +523,7 @@ include (cmake/find/fast_float.cmake) include (cmake/find/rapidjson.cmake) include (cmake/find/fastops.cmake) include (cmake/find/odbc.cmake) +include (cmake/find/nanodbc.cmake) include (cmake/find/rocksdb.cmake) include (cmake/find/libpqxx.cmake) include (cmake/find/nuraft.cmake) @@ -566,6 +593,9 @@ include_directories(${ConfigIncludePath}) # Add as many warnings as possible for our own code. include (cmake/warnings.cmake) +# Check if needed compiler flags are supported +include (cmake/check_flags.cmake) + add_subdirectory (base) add_subdirectory (src) add_subdirectory (programs) diff --git a/base/bridge/IBridge.cpp b/base/bridge/IBridge.cpp index 348b0fd7190..b2ec53158b1 100644 --- a/base/bridge/IBridge.cpp +++ b/base/bridge/IBridge.cpp @@ -159,17 +159,12 @@ void IBridge::initialize(Application & self) if (port > 0xFFFF) throw Exception("Out of range 'http-port': " + std::to_string(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND); - http_timeout = config().getUInt("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT); + http_timeout = config().getUInt64("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT); max_server_connections = config().getUInt("max-server-connections", 1024); - keep_alive_timeout = config().getUInt("keep-alive-timeout", 10); + keep_alive_timeout = config().getUInt64("keep-alive-timeout", 10); initializeTerminationAndSignalProcessing(); -#if USE_ODBC - if (bridgeName() == "ODBCBridge") - Poco::Data::ODBC::Connector::registerConnector(); -#endif - ServerApplication::initialize(self); // NOLINT } @@ -200,8 +195,8 @@ int IBridge::main(const std::vector & /*args*/) http_params->setKeepAliveTimeout(keep_alive_timeout); auto shared_context = Context::createShared(); - Context context(Context::createGlobal(shared_context.get())); - context.makeGlobalContext(); + auto context = Context::createGlobal(shared_context.get()); + context->makeGlobalContext(); if (config().has("query_masking_rules")) SensitiveDataMasker::setInstance(std::make_unique(config(), "query_masking_rules")); diff --git a/base/bridge/IBridge.h b/base/bridge/IBridge.h index f9bb00f9e48..c64003d9959 100644 --- a/base/bridge/IBridge.h +++ b/base/bridge/IBridge.h @@ -2,10 +2,11 @@ #include #include -#include -#include #include +#include +#include + namespace DB { @@ -29,9 +30,9 @@ protected: int main(const std::vector & args) override; - virtual const std::string bridgeName() const = 0; + virtual std::string bridgeName() const = 0; - virtual HandlerFactoryPtr getHandlerFactoryPtr(Context & context) const = 0; + virtual HandlerFactoryPtr getHandlerFactoryPtr(ContextPtr context) const = 0; size_t keep_alive_timeout; diff --git a/src/Common/BorrowedObjectPool.h b/base/common/BorrowedObjectPool.h similarity index 99% rename from src/Common/BorrowedObjectPool.h rename to base/common/BorrowedObjectPool.h index d5263cf92a8..6a90a7e7122 100644 --- a/src/Common/BorrowedObjectPool.h +++ b/base/common/BorrowedObjectPool.h @@ -7,8 +7,7 @@ #include #include - -#include +#include /** Pool for limited size objects that cannot be used from different threads simultaneously. * The main use case is to have fixed size of objects that can be reused in difference threads during their lifetime diff --git a/base/common/CMakeLists.txt b/base/common/CMakeLists.txt index 7dfb9bc10c0..e5e18669ebe 100644 --- a/base/common/CMakeLists.txt +++ b/base/common/CMakeLists.txt @@ -29,7 +29,7 @@ elseif (ENABLE_READLINE) endif () if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-include ${ClickHouse_SOURCE_DIR}/base/common/iostream_debug_helpers.h") + set (INCLUDE_DEBUG_HELPERS "-include \"${ClickHouse_SOURCE_DIR}/base/common/iostream_debug_helpers.h\"") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") endif () @@ -45,7 +45,7 @@ if (USE_INTERNAL_CCTZ) set_source_files_properties(DateLUTImpl.cpp PROPERTIES COMPILE_DEFINITIONS USE_INTERNAL_CCTZ) endif() -target_include_directories(common PUBLIC .. ${CMAKE_CURRENT_BINARY_DIR}/..) +target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..") if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES) target_link_libraries(common PUBLIC -Wl,-U,_inside_main) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 363f281584e..9e60181e802 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -25,7 +25,7 @@ #if defined(__PPC__) -#if !__clang__ +#if !defined(__clang__) #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif #endif @@ -1266,7 +1266,7 @@ public: }; #if defined(__PPC__) -#if !__clang__ +#if !defined(__clang__) #pragma GCC diagnostic pop #endif #endif diff --git a/base/common/DecomposedFloat.h b/base/common/DecomposedFloat.h new file mode 100644 index 00000000000..078ba823c15 --- /dev/null +++ b/base/common/DecomposedFloat.h @@ -0,0 +1,216 @@ +#pragma once + +#include +#include +#include +#include + + +/// Allows to check the internals of IEEE-754 floating point number. + +template struct FloatTraits; + +template <> +struct FloatTraits +{ + using UInt = uint32_t; + static constexpr size_t bits = 32; + static constexpr size_t exponent_bits = 8; + static constexpr size_t mantissa_bits = bits - exponent_bits - 1; +}; + +template <> +struct FloatTraits +{ + using UInt = uint64_t; + static constexpr size_t bits = 64; + static constexpr size_t exponent_bits = 11; + static constexpr size_t mantissa_bits = bits - exponent_bits - 1; +}; + + +/// x = sign * (2 ^ normalized_exponent) * (1 + mantissa * 2 ^ -mantissa_bits) +/// x = sign * (2 ^ normalized_exponent + mantissa * 2 ^ (normalized_exponent - mantissa_bits)) +template +struct DecomposedFloat +{ + using Traits = FloatTraits; + + DecomposedFloat(T x) + { + memcpy(&x_uint, &x, sizeof(x)); + } + + typename Traits::UInt x_uint; + + bool is_negative() const + { + return x_uint >> (Traits::bits - 1); + } + + /// Returns 0 for both +0. and -0. + int sign() const + { + return (exponent() == 0 && mantissa() == 0) + ? 0 + : (is_negative() + ? -1 + : 1); + } + + uint16_t exponent() const + { + return (x_uint >> (Traits::mantissa_bits)) & (((1ull << (Traits::exponent_bits + 1)) - 1) >> 1); + } + + int16_t normalized_exponent() const + { + return int16_t(exponent()) - ((1ull << (Traits::exponent_bits - 1)) - 1); + } + + uint64_t mantissa() const + { + return x_uint & ((1ull << Traits::mantissa_bits) - 1); + } + + int64_t mantissa_with_sign() const + { + return is_negative() ? -mantissa() : mantissa(); + } + + /// NOTE Probably floating point instructions can be better. + bool is_integer_in_representable_range() const + { + return x_uint == 0 + || (normalized_exponent() >= 0 /// The number is not less than one + /// The number is inside the range where every integer has exact representation in float + && normalized_exponent() <= static_cast(Traits::mantissa_bits) + /// After multiplying by 2^exp, the fractional part becomes zero, means the number is integer + && ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0)); + } + + + /// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic. + /// Infinities are compared correctly. NaNs are treat similarly to infinities, so they can be less than all numbers. + /// (note that we need total order) + template + int compare(Int rhs) + { + if (rhs == 0) + return sign(); + + /// Different signs + if (is_negative() && rhs > 0) + return -1; + if (!is_negative() && rhs < 0) + return 1; + + /// Fractional number with magnitude less than one + if (normalized_exponent() < 0) + { + if (!is_negative()) + return rhs > 0 ? -1 : 1; + else + return rhs >= 0 ? -1 : 1; + } + + /// The case of the most negative integer + if constexpr (is_signed_v) + { + if (rhs == std::numeric_limits::lowest()) + { + assert(is_negative()); + + if (normalized_exponent() < static_cast(8 * sizeof(Int) - is_signed_v)) + return 1; + if (normalized_exponent() > static_cast(8 * sizeof(Int) - is_signed_v)) + return -1; + + if (mantissa() == 0) + return 0; + else + return -1; + } + } + + /// Too large number: abs(float) > abs(rhs). Also the case with infinities and NaN. + if (normalized_exponent() >= static_cast(8 * sizeof(Int) - is_signed_v)) + return is_negative() ? -1 : 1; + + using UInt = make_unsigned_t; + UInt uint_rhs = rhs < 0 ? -rhs : rhs; + + /// Smaller octave: abs(rhs) < abs(float) + if (uint_rhs < (static_cast(1) << normalized_exponent())) + return is_negative() ? -1 : 1; + + /// Larger octave: abs(rhs) > abs(float) + if (normalized_exponent() + 1 < static_cast(8 * sizeof(Int) - is_signed_v) + && uint_rhs >= (static_cast(1) << (normalized_exponent() + 1))) + return is_negative() ? 1 : -1; + + /// The same octave + /// uint_rhs == 2 ^ normalized_exponent + mantissa * 2 ^ (normalized_exponent - mantissa_bits) + + bool large_and_always_integer = normalized_exponent() >= static_cast(Traits::mantissa_bits); + + typename Traits::UInt a = large_and_always_integer + ? mantissa() << (normalized_exponent() - Traits::mantissa_bits) + : mantissa() >> (Traits::mantissa_bits - normalized_exponent()); + + typename Traits::UInt b = uint_rhs - (static_cast(1) << normalized_exponent()); + + if (a < b) + return is_negative() ? 1 : -1; + if (a > b) + return is_negative() ? -1 : 1; + + /// Float has no fractional part means that the numbers are equal. + if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0) + return 0; + else + /// Float has fractional part means its abs value is larger. + return is_negative() ? -1 : 1; + } + + + template + bool equals(Int rhs) + { + return compare(rhs) == 0; + } + + template + bool notEquals(Int rhs) + { + return compare(rhs) != 0; + } + + template + bool less(Int rhs) + { + return compare(rhs) < 0; + } + + template + bool greater(Int rhs) + { + return compare(rhs) > 0; + } + + template + bool lessOrEquals(Int rhs) + { + return compare(rhs) <= 0; + } + + template + bool greaterOrEquals(Int rhs) + { + return compare(rhs) >= 0; + } +}; + + +using DecomposedFloat64 = DecomposedFloat; +using DecomposedFloat32 = DecomposedFloat; diff --git a/src/Common/MoveOrCopyIfThrow.h b/base/common/MoveOrCopyIfThrow.h similarity index 100% rename from src/Common/MoveOrCopyIfThrow.h rename to base/common/MoveOrCopyIfThrow.h diff --git a/base/common/ReplxxLineReader.cpp b/base/common/ReplxxLineReader.cpp index fcd1610e589..7893e56d751 100644 --- a/base/common/ReplxxLineReader.cpp +++ b/base/common/ReplxxLineReader.cpp @@ -91,6 +91,10 @@ ReplxxLineReader::ReplxxLineReader( /// it also binded to M-p/M-n). rx.bind_key(Replxx::KEY::meta('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_NEXT, code); }); rx.bind_key(Replxx::KEY::meta('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_PREVIOUS, code); }); + /// By default M-BACKSPACE is KILL_TO_WHITESPACE_ON_LEFT, while in readline it is backward-kill-word + rx.bind_key(Replxx::KEY::meta(Replxx::KEY::BACKSPACE), [this](char32_t code) { return rx.invoke(Replxx::ACTION::KILL_TO_BEGINING_OF_WORD, code); }); + /// By default C-w is KILL_TO_BEGINING_OF_WORD, while in readline it is unix-word-rubout + rx.bind_key(Replxx::KEY::control('W'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::KILL_TO_WHITESPACE_ON_LEFT, code); }); rx.bind_key(Replxx::KEY::meta('E'), [this](char32_t) { openEditor(); return Replxx::ACTION_RESULT::CONTINUE; }); } diff --git a/base/common/arithmeticOverflow.h b/base/common/arithmeticOverflow.h index c170d214636..175e75a62f4 100644 --- a/base/common/arithmeticOverflow.h +++ b/base/common/arithmeticOverflow.h @@ -56,27 +56,33 @@ namespace common } template <> - inline bool addOverflow(__int128 x, __int128 y, __int128 & res) + inline bool addOverflow(Int128 x, Int128 y, Int128 & res) { - static constexpr __int128 min_int128 = minInt128(); - static constexpr __int128 max_int128 = maxInt128(); res = addIgnoreOverflow(x, y); - return (y > 0 && x > max_int128 - y) || (y < 0 && x < min_int128 - y); + return (y > 0 && x > std::numeric_limits::max() - y) || + (y < 0 && x < std::numeric_limits::min() - y); } template <> - inline bool addOverflow(wInt256 x, wInt256 y, wInt256 & res) + inline bool addOverflow(UInt128 x, UInt128 y, UInt128 & res) { res = addIgnoreOverflow(x, y); - return (y > 0 && x > std::numeric_limits::max() - y) || - (y < 0 && x < std::numeric_limits::min() - y); + return x > std::numeric_limits::max() - y; } template <> - inline bool addOverflow(wUInt256 x, wUInt256 y, wUInt256 & res) + inline bool addOverflow(Int256 x, Int256 y, Int256 & res) { res = addIgnoreOverflow(x, y); - return x > std::numeric_limits::max() - y; + return (y > 0 && x > std::numeric_limits::max() - y) || + (y < 0 && x < std::numeric_limits::min() - y); + } + + template <> + inline bool addOverflow(UInt256 x, UInt256 y, UInt256 & res) + { + res = addIgnoreOverflow(x, y); + return x > std::numeric_limits::max() - y; } template @@ -104,24 +110,30 @@ namespace common } template <> - inline bool subOverflow(__int128 x, __int128 y, __int128 & res) + inline bool subOverflow(Int128 x, Int128 y, Int128 & res) { - static constexpr __int128 min_int128 = minInt128(); - static constexpr __int128 max_int128 = maxInt128(); res = subIgnoreOverflow(x, y); - return (y < 0 && x > max_int128 + y) || (y > 0 && x < min_int128 + y); + return (y < 0 && x > std::numeric_limits::max() + y) || + (y > 0 && x < std::numeric_limits::min() + y); } template <> - inline bool subOverflow(wInt256 x, wInt256 y, wInt256 & res) + inline bool subOverflow(UInt128 x, UInt128 y, UInt128 & res) { res = subIgnoreOverflow(x, y); - return (y < 0 && x > std::numeric_limits::max() + y) || - (y > 0 && x < std::numeric_limits::min() + y); + return x < y; } template <> - inline bool subOverflow(wUInt256 x, wUInt256 y, wUInt256 & res) + inline bool subOverflow(Int256 x, Int256 y, Int256 & res) + { + res = subIgnoreOverflow(x, y); + return (y < 0 && x > std::numeric_limits::max() + y) || + (y > 0 && x < std::numeric_limits::min() + y); + } + + template <> + inline bool subOverflow(UInt256 x, UInt256 y, UInt256 & res) { res = subIgnoreOverflow(x, y); return x < y; @@ -151,36 +163,33 @@ namespace common return __builtin_smulll_overflow(x, y, &res); } + /// Overflow check is not implemented for big integers. + template <> - inline bool mulOverflow(__int128 x, __int128 y, __int128 & res) + inline bool mulOverflow(Int128 x, Int128 y, Int128 & res) { res = mulIgnoreOverflow(x, y); - if (!x || !y) - return false; - - unsigned __int128 a = (x > 0) ? x : -x; - unsigned __int128 b = (y > 0) ? y : -y; - return mulIgnoreOverflow(a, b) / b != a; + return false; } template <> - inline bool mulOverflow(wInt256 x, wInt256 y, wInt256 & res) + inline bool mulOverflow(Int256 x, Int256 y, Int256 & res) { res = mulIgnoreOverflow(x, y); - if (!x || !y) - return false; - - wInt256 a = (x > 0) ? x : -x; - wInt256 b = (y > 0) ? y : -y; - return mulIgnoreOverflow(a, b) / b != a; + return false; } template <> - inline bool mulOverflow(wUInt256 x, wUInt256 y, wUInt256 & res) + inline bool mulOverflow(UInt128 x, UInt128 y, UInt128 & res) { res = mulIgnoreOverflow(x, y); - if (!x || !y) - return false; - return res / y != x; + return false; + } + + template <> + inline bool mulOverflow(UInt256 x, UInt256 y, UInt256 & res) + { + res = mulIgnoreOverflow(x, y); + return false; } } diff --git a/base/common/extended_types.h b/base/common/extended_types.h index 2ae70c0f432..79209568ef5 100644 --- a/base/common/extended_types.h +++ b/base/common/extended_types.h @@ -5,16 +5,14 @@ #include #include -using Int128 = __int128; -using wInt256 = wide::integer<256, signed>; -using wUInt256 = wide::integer<256, unsigned>; +using Int128 = wide::integer<128, signed>; +using UInt128 = wide::integer<128, unsigned>; +using Int256 = wide::integer<256, signed>; +using UInt256 = wide::integer<256, unsigned>; -static_assert(sizeof(wInt256) == 32); -static_assert(sizeof(wUInt256) == 32); - -static constexpr __int128 minInt128() { return static_cast(1) << 127; } -static constexpr __int128 maxInt128() { return (static_cast(1) << 127) - 1; } +static_assert(sizeof(Int256) == 32); +static_assert(sizeof(UInt256) == 32); /// The standard library type traits, such as std::is_arithmetic, with one exception /// (std::common_type), are "set in stone". Attempting to specialize them causes undefined behavior. @@ -26,7 +24,7 @@ struct is_signed }; template <> struct is_signed { static constexpr bool value = true; }; -template <> struct is_signed { static constexpr bool value = true; }; +template <> struct is_signed { static constexpr bool value = true; }; template inline constexpr bool is_signed_v = is_signed::value; @@ -37,7 +35,8 @@ struct is_unsigned static constexpr bool value = std::is_unsigned_v; }; -template <> struct is_unsigned { static constexpr bool value = true; }; +template <> struct is_unsigned { static constexpr bool value = true; }; +template <> struct is_unsigned { static constexpr bool value = true; }; template inline constexpr bool is_unsigned_v = is_unsigned::value; @@ -51,8 +50,9 @@ struct is_integer }; template <> struct is_integer { static constexpr bool value = true; }; -template <> struct is_integer { static constexpr bool value = true; }; -template <> struct is_integer { static constexpr bool value = true; }; +template <> struct is_integer { static constexpr bool value = true; }; +template <> struct is_integer { static constexpr bool value = true; }; +template <> struct is_integer { static constexpr bool value = true; }; template inline constexpr bool is_integer_v = is_integer::value; @@ -64,7 +64,11 @@ struct is_arithmetic static constexpr bool value = std::is_arithmetic_v; }; -template <> struct is_arithmetic<__int128> { static constexpr bool value = true; }; +template <> struct is_arithmetic { static constexpr bool value = true; }; +template <> struct is_arithmetic { static constexpr bool value = true; }; +template <> struct is_arithmetic { static constexpr bool value = true; }; +template <> struct is_arithmetic { static constexpr bool value = true; }; + template inline constexpr bool is_arithmetic_v = is_arithmetic::value; @@ -75,9 +79,10 @@ struct make_unsigned typedef std::make_unsigned_t type; }; -template <> struct make_unsigned { using type = unsigned __int128; }; -template <> struct make_unsigned { using type = wUInt256; }; -template <> struct make_unsigned { using type = wUInt256; }; +template <> struct make_unsigned { using type = UInt128; }; +template <> struct make_unsigned { using type = UInt128; }; +template <> struct make_unsigned { using type = UInt256; }; +template <> struct make_unsigned { using type = UInt256; }; template using make_unsigned_t = typename make_unsigned::type; @@ -87,8 +92,10 @@ struct make_signed typedef std::make_signed_t type; }; -template <> struct make_signed { using type = wInt256; }; -template <> struct make_signed { using type = wInt256; }; +template <> struct make_signed { using type = Int128; }; +template <> struct make_signed { using type = Int128; }; +template <> struct make_signed { using type = Int256; }; +template <> struct make_signed { using type = Int256; }; template using make_signed_t = typename make_signed::type; @@ -98,8 +105,10 @@ struct is_big_int static constexpr bool value = false; }; -template <> struct is_big_int { static constexpr bool value = true; }; -template <> struct is_big_int { static constexpr bool value = true; }; +template <> struct is_big_int { static constexpr bool value = true; }; +template <> struct is_big_int { static constexpr bool value = true; }; +template <> struct is_big_int { static constexpr bool value = true; }; +template <> struct is_big_int { static constexpr bool value = true; }; template inline constexpr bool is_big_int_v = is_big_int::value; diff --git a/base/common/getThreadId.cpp b/base/common/getThreadId.cpp index 700c51f21fc..054e9be9074 100644 --- a/base/common/getThreadId.cpp +++ b/base/common/getThreadId.cpp @@ -25,6 +25,10 @@ uint64_t getThreadId() current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid #elif defined(OS_FREEBSD) current_tid = pthread_getthreadid_np(); +#elif defined(OS_SUNOS) + // On Solaris-derived systems, this returns the ID of the LWP, analogous + // to a thread. + current_tid = static_cast(pthread_self()); #else if (0 != pthread_threadid_np(nullptr, ¤t_tid)) throw std::logic_error("pthread_threadid_np returned error"); diff --git a/base/common/itoa.h b/base/common/itoa.h index a02e7b68c05..4c86239de36 100644 --- a/base/common/itoa.h +++ b/base/common/itoa.h @@ -30,9 +30,8 @@ #include #include #include +#include -using int128_t = __int128; -using uint128_t = unsigned __int128; namespace impl { @@ -106,7 +105,7 @@ using UnsignedOfSize = typename SelectType uint16_t, uint32_t, uint64_t, - uint128_t + __uint128_t >::Result; /// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in @@ -313,7 +312,8 @@ namespace convert } } -static inline int digits10(uint128_t x) +template +static inline int digits10(T x) { if (x < 10ULL) return 1; @@ -346,8 +346,11 @@ static inline int digits10(uint128_t x) return 12 + digits10(x / 1000000000000ULL); } -static inline char * writeUIntText(uint128_t x, char * p) +template +static inline char * writeUIntText(T x, char * p) { + static_assert(is_unsigned_v); + int len = digits10(x); auto pp = p + len; while (x >= 100) @@ -370,14 +373,28 @@ static inline char * writeLeadingMinus(char * pos) return pos + 1; } -static inline char * writeSIntText(int128_t x, char * pos) +template +static inline char * writeSIntText(T x, char * pos) { - static constexpr int128_t min_int128 = uint128_t(1) << 127; + static_assert(std::is_same_v || std::is_same_v); - if (unlikely(x == min_int128)) + using UnsignedT = make_unsigned_t; + static constexpr T min_int = UnsignedT(1) << (sizeof(T) * 8 - 1); + + if (unlikely(x == min_int)) { - memcpy(pos, "-170141183460469231731687303715884105728", 40); - return pos + 40; + if constexpr (std::is_same_v) + { + const char * res = "-170141183460469231731687303715884105728"; + memcpy(pos, res, strlen(res)); + return pos + strlen(res); + } + else if constexpr (std::is_same_v) + { + const char * res = "-57896044618658097711785492504343953926634992332820282019728792003956564819968"; + memcpy(pos, res, strlen(res)); + return pos + strlen(res); + } } if (x < 0) @@ -385,7 +402,7 @@ static inline char * writeSIntText(int128_t x, char * pos) x = -x; pos = writeLeadingMinus(pos); } - return writeUIntText(static_cast(x), pos); + return writeUIntText(UnsignedT(x), pos); } } @@ -403,13 +420,25 @@ inline char * itoa(char8_t i, char * p) } template <> -inline char * itoa(uint128_t i, char * p) +inline char * itoa(UInt128 i, char * p) { return impl::writeUIntText(i, p); } template <> -inline char * itoa(int128_t i, char * p) +inline char * itoa(Int128 i, char * p) +{ + return impl::writeSIntText(i, p); +} + +template <> +inline char * itoa(UInt256 i, char * p) +{ + return impl::writeUIntText(i, p); +} + +template <> +inline char * itoa(Int256 i, char * p) { return impl::writeSIntText(i, p); } diff --git a/base/common/strong_typedef.h b/base/common/strong_typedef.h index 77b83bfa6e5..a1e2b253aa7 100644 --- a/base/common/strong_typedef.h +++ b/base/common/strong_typedef.h @@ -4,7 +4,8 @@ #include #include -template + +template struct StrongTypedef { private: @@ -38,14 +39,16 @@ public: bool operator==(const Self & rhs) const { return t == rhs.t; } bool operator<(const Self & rhs) const { return t < rhs.t; } + bool operator>(const Self & rhs) const { return t > rhs.t; } T & toUnderType() { return t; } const T & toUnderType() const { return t; } }; + namespace std { - template + template struct hash> { size_t operator()(const StrongTypedef & x) const diff --git a/base/common/throwError.h b/base/common/throwError.h index b495a0fbc7a..dd352913e78 100644 --- a/base/common/throwError.h +++ b/base/common/throwError.h @@ -1,13 +1,15 @@ #pragma once + #include + /// Throw DB::Exception-like exception before its definition. /// DB::Exception derived from Poco::Exception derived from std::exception. -/// DB::Exception generally cought as Poco::Exception. std::exception generally has other catch blocks and could lead to other outcomes. +/// DB::Exception generally caught as Poco::Exception. std::exception generally has other catch blocks and could lead to other outcomes. /// DB::Exception is not defined yet. It'd better to throw Poco::Exception but we do not want to include any big header here, even . /// So we throw some std::exception instead in the hope its catch block is the same as DB::Exception one. template -inline void throwError(const T & err) +[[noreturn]] inline void throwError(const T & err) { throw std::runtime_error(err); } diff --git a/base/common/time.h b/base/common/time.h index 1bf588b7cb3..d0b8e94a9a5 100644 --- a/base/common/time.h +++ b/base/common/time.h @@ -2,7 +2,7 @@ #include -#if defined (OS_DARWIN) +#if defined (OS_DARWIN) || defined (OS_SUNOS) # define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC #elif defined (OS_FREEBSD) # define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST diff --git a/base/common/types.h b/base/common/types.h index bd5c28fe73b..e178653f7c6 100644 --- a/base/common/types.h +++ b/base/common/types.h @@ -13,7 +13,12 @@ using char8_t = unsigned char; #endif /// This is needed for more strict aliasing. https://godbolt.org/z/xpJBSb https://stackoverflow.com/a/57453713 +#if !defined(PVS_STUDIO) /// But PVS-Studio does not treat it correctly. using UInt8 = char8_t; +#else +using UInt8 = uint8_t; +#endif + using UInt16 = uint16_t; using UInt32 = uint32_t; using UInt64 = uint64_t; diff --git a/base/common/wide_integer.h b/base/common/wide_integer.h index c9d1eaa32aa..419b4e4558c 100644 --- a/base/common/wide_integer.h +++ b/base/common/wide_integer.h @@ -58,9 +58,11 @@ public: using signed_base_type = int64_t; // ctors - constexpr integer() noexcept; + constexpr integer() noexcept = default; + template constexpr integer(T rhs) noexcept; + template constexpr integer(std::initializer_list il) noexcept; @@ -108,9 +110,9 @@ public: constexpr explicit operator bool() const noexcept; template - using __integral_not_wide_integer_class = typename std::enable_if::value, T>::type; + using _integral_not_wide_integer_class = typename std::enable_if::value, T>::type; - template > + template > constexpr operator T() const noexcept; constexpr operator long double() const noexcept; @@ -119,25 +121,27 @@ public: struct _impl; + base_type items[_impl::item_count]; + private: template friend class integer; friend class std::numeric_limits>; friend class std::numeric_limits>; - - base_type items[_impl::item_count]; }; template static constexpr bool ArithmeticConcept() noexcept; + template -using __only_arithmetic = typename std::enable_if() && ArithmeticConcept()>::type; +using _only_arithmetic = typename std::enable_if() && ArithmeticConcept()>::type; template static constexpr bool IntegralConcept() noexcept; + template -using __only_integer = typename std::enable_if() && IntegralConcept()>::type; +using _only_integer = typename std::enable_if() && IntegralConcept()>::type; // Unary operators template @@ -153,54 +157,55 @@ constexpr integer operator+(const integer & lhs) noe template std::common_type_t, integer> constexpr operator*(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator*(const Arithmetic & rhs, const Arithmetic2 & lhs); template std::common_type_t, integer> constexpr operator/(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator/(const Arithmetic & rhs, const Arithmetic2 & lhs); template std::common_type_t, integer> constexpr operator+(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator+(const Arithmetic & rhs, const Arithmetic2 & lhs); template std::common_type_t, integer> constexpr operator-(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator-(const Arithmetic & rhs, const Arithmetic2 & lhs); template std::common_type_t, integer> constexpr operator%(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator%(const Integral & rhs, const Integral2 & lhs); template std::common_type_t, integer> constexpr operator&(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator&(const Integral & rhs, const Integral2 & lhs); template std::common_type_t, integer> constexpr operator|(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator|(const Integral & rhs, const Integral2 & lhs); template std::common_type_t, integer> constexpr operator^(const integer & lhs, const integer & rhs); -template > +template > std::common_type_t constexpr operator^(const Integral & rhs, const Integral2 & lhs); // TODO: Integral template constexpr integer operator<<(const integer & lhs, int n) noexcept; + template constexpr integer operator>>(const integer & lhs, int n) noexcept; @@ -217,32 +222,32 @@ constexpr integer operator>>(const integer & lhs, In template constexpr bool operator<(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator<(const Arithmetic & rhs, const Arithmetic2 & lhs); template constexpr bool operator>(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator>(const Arithmetic & rhs, const Arithmetic2 & lhs); template constexpr bool operator<=(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator<=(const Arithmetic & rhs, const Arithmetic2 & lhs); template constexpr bool operator>=(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator>=(const Arithmetic & rhs, const Arithmetic2 & lhs); template constexpr bool operator==(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator==(const Arithmetic & rhs, const Arithmetic2 & lhs); template constexpr bool operator!=(const integer & lhs, const integer & rhs); -template > +template > constexpr bool operator!=(const Arithmetic & rhs, const Arithmetic2 & lhs); } diff --git a/base/common/wide_integer_impl.h b/base/common/wide_integer_impl.h index 5b981326e25..725caec6a3e 100644 --- a/base/common/wide_integer_impl.h +++ b/base/common/wide_integer_impl.h @@ -5,6 +5,7 @@ /// (See at http://www.boost.org/LICENSE_1_0.txt) #include "throwError.h" + #include #include #include @@ -81,7 +82,7 @@ public: res.items[T::_impl::big(0)] = std::numeric_limits::signed_base_type>::min(); return res; } - return 0; + return wide::integer(0); } static constexpr wide::integer max() noexcept @@ -176,7 +177,7 @@ struct integer::_impl constexpr static bool is_negative(const integer & n) noexcept { if constexpr (std::is_same_v) - return static_cast(n.items[big(0)]) < 0; + return static_cast(n.items[integer::_impl::big(0)]) < 0; else return false; } @@ -193,40 +194,36 @@ struct integer::_impl template constexpr static integer make_positive(const integer & n) noexcept { - return is_negative(n) ? operator_unary_minus(n) : n; + return is_negative(n) ? integer(operator_unary_minus(n)) : n; } template __attribute__((no_sanitize("undefined"))) constexpr static auto to_Integral(T f) noexcept { - if constexpr (std::is_same_v) - return f; - else if constexpr (std::is_signed_v) + if constexpr (std::is_signed_v) return static_cast(f); else return static_cast(f); } template - constexpr static void wide_integer_from_bultin(integer & self, Integral rhs) noexcept + constexpr static void wide_integer_from_builtin(integer & self, Integral rhs) noexcept { - self.items[0] = _impl::to_Integral(rhs); - if constexpr (std::is_same_v) - self.items[1] = rhs >> base_bits; + static_assert(sizeof(Integral) <= sizeof(base_type)); - constexpr const unsigned start = (sizeof(Integral) == 16) ? 2 : 1; + self.items[0] = _impl::to_Integral(rhs); if constexpr (std::is_signed_v) { if (rhs < 0) { - for (unsigned i = start; i < item_count; ++i) + for (size_t i = 1; i < item_count; ++i) self.items[i] = -1; return; } } - for (unsigned i = start; i < item_count; ++i) + for (size_t i = 1; i < item_count; ++i) self.items[i] = 0; } @@ -239,7 +236,8 @@ struct integer::_impl * a_(n - 1) = a_n * max_int + b2, a_n <= max_int <- base case. */ template - constexpr static void set_multiplier(integer & self, T t) noexcept { + constexpr static void set_multiplier(integer & self, T t) noexcept + { constexpr uint64_t max_int = std::numeric_limits::max(); /// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast). @@ -260,7 +258,8 @@ struct integer::_impl self += static_cast(t - alpha * static_cast(max_int)); // += b_i } - constexpr static void wide_integer_from_bultin(integer& self, double rhs) noexcept { + constexpr static void wide_integer_from_builtin(integer& self, double rhs) noexcept + { constexpr int64_t max_int = std::numeric_limits::max(); constexpr int64_t min_int = std::numeric_limits::min(); @@ -271,9 +270,13 @@ struct integer::_impl /// As to_Integral does a static_cast to int64_t, it may result in UB. /// The necessary check here is that long double has enough significant (mantissa) bits to store the /// int64_t max value precisely. + + //TODO Be compatible with Apple aarch64 +#if not (defined(__APPLE__) && defined(__aarch64__)) static_assert(LDBL_MANT_DIG >= 64, "On your system long double has less than 64 precision bits," "which may result in UB when initializing double from int64_t"); +#endif if ((rhs > 0 && rhs < static_cast(max_int)) || (rhs < 0 && rhs > static_cast(min_int))) { @@ -379,13 +382,13 @@ struct integer::_impl if (bit_shift) lhs.items[big(items_shift)] |= std::numeric_limits::max() << (base_bits - bit_shift); - for (unsigned i = item_count - items_shift; i < items_shift; ++i) - lhs.items[little(i)] = std::numeric_limits::max(); + for (unsigned i = 0; i < items_shift; ++i) + lhs.items[big(i)] = std::numeric_limits::max(); } else { - for (unsigned i = item_count - items_shift; i < items_shift; ++i) - lhs.items[little(i)] = 0; + for (unsigned i = 0; i < items_shift; ++i) + lhs.items[big(i)] = 0; } return lhs; @@ -393,23 +396,23 @@ struct integer::_impl private: template - constexpr static base_type get_item(const T & x, unsigned number) + constexpr static base_type get_item(const T & x, unsigned idx) { if constexpr (IsWideInteger::value) { - if (number < T::_impl::item_count) - return x.items[number]; + if (idx < T::_impl::item_count) + return x.items[idx]; return 0; } else { if constexpr (sizeof(T) <= sizeof(base_type)) { - if (!number) + if (0 == idx) return x; } - else if (number * sizeof(base_type) < sizeof(T)) - return x >> (number * base_bits); // & std::numeric_limits::max() + else if (idx * sizeof(base_type) < sizeof(T)) + return x >> (idx * base_bits); // & std::numeric_limits::max() return 0; } } @@ -435,7 +438,7 @@ private: for (unsigned i = 1; i < item_count; ++i) { - if (underflows[i-1]) + if (underflows[i - 1]) { base_type & res_item = res.items[little(i)]; if (res_item == 0) @@ -468,7 +471,7 @@ private: for (unsigned i = 1; i < item_count; ++i) { - if (overflows[i-1]) + if (overflows[i - 1]) { base_type & res_item = res.items[little(i)]; ++res_item; @@ -528,6 +531,17 @@ private: res.items[little(2)] = r12 >> 64; return res; } + else if constexpr (Bits == 128 && sizeof(base_type) == 8) + { + using CompilerUInt128 = unsigned __int128; + CompilerUInt128 a = (CompilerUInt128(lhs.items[1]) << 64) + lhs.items[0]; + CompilerUInt128 b = (CompilerUInt128(rhs.items[1]) << 64) + rhs.items[0]; + CompilerUInt128 c = a * b; + integer res; + res.items[0] = c; + res.items[1] = c >> 64; + return res; + } else { integer res{}; @@ -653,7 +667,7 @@ public: } template - constexpr static bool operator_more(const integer & lhs, const T & rhs) noexcept + constexpr static bool operator_greater(const integer & lhs, const T & rhs) noexcept { if constexpr (should_keep_size()) { @@ -673,7 +687,7 @@ public: else { static_assert(IsWideInteger::value); - return std::common_type_t, T>::_impl::operator_more(T(lhs), rhs); + return std::common_type_t, T>::_impl::operator_greater(T(lhs), rhs); } } @@ -760,7 +774,6 @@ public: } } -private: template constexpr static bool is_zero(const T & x) { @@ -777,46 +790,65 @@ private: } /// returns quotient as result and remainder in numerator. - template - constexpr static T divide(T & numerator, T && denominator) + template + constexpr static integer divide(integer & numerator, integer denominator) { - if (is_zero(denominator)) - throwError("divide by zero"); + static_assert(std::is_unsigned_v); - T & n = numerator; - T & d = denominator; - T x = 1; - T quotient = 0; - - while (!operator_more(d, n) && operator_eq(operator_amp(shift_right(d, base_bits * item_count - 1), 1), 0)) + if constexpr (Bits == 128 && sizeof(base_type) == 8) { - x = shift_left(x, 1); - d = shift_left(d, 1); + using CompilerUInt128 = unsigned __int128; + + CompilerUInt128 a = (CompilerUInt128(numerator.items[1]) << 64) + numerator.items[0]; + CompilerUInt128 b = (CompilerUInt128(denominator.items[1]) << 64) + denominator.items[0]; + CompilerUInt128 c = a / b; + + integer res; + res.items[0] = c; + res.items[1] = c >> 64; + + CompilerUInt128 remainder = a - b * c; + numerator.items[0] = remainder; + numerator.items[1] = remainder >> 64; + + return res; } - while (!operator_eq(x, 0)) + if (is_zero(denominator)) + throwError("Division by zero"); + + integer x = 1; + integer quotient = 0; + + while (!operator_greater(denominator, numerator) && is_zero(operator_amp(shift_right(denominator, Bits2 - 1), 1))) { - if (!operator_more(d, n)) + x = shift_left(x, 1); + denominator = shift_left(denominator, 1); + } + + while (!is_zero(x)) + { + if (!operator_greater(denominator, numerator)) { - n = operator_minus(n, d); + numerator = operator_minus(numerator, denominator); quotient = operator_pipe(quotient, x); } x = shift_right(x, 1); - d = shift_right(d, 1); + denominator = shift_right(denominator, 1); } return quotient; } -public: template constexpr static auto operator_slash(const integer & lhs, const T & rhs) { if constexpr (should_keep_size()) { - integer numerator = make_positive(lhs); - integer quotient = divide(numerator, make_positive(integer(rhs))); + integer numerator = make_positive(lhs); + integer denominator = make_positive(integer(rhs)); + integer quotient = integer::_impl::divide(numerator, std::move(denominator)); if (std::is_same_v && is_negative(rhs) != is_negative(lhs)) quotient = operator_unary_minus(quotient); @@ -834,8 +866,9 @@ public: { if constexpr (should_keep_size()) { - integer remainder = make_positive(lhs); - divide(remainder, make_positive(integer(rhs))); + integer remainder = make_positive(lhs); + integer denominator = make_positive(integer(rhs)); + integer::_impl::divide(remainder, std::move(denominator)); if (std::is_same_v && is_negative(lhs)) remainder = operator_unary_minus(remainder); @@ -901,7 +934,7 @@ public: ++c; } else - throwError("invalid char from"); + throwError("Invalid char from"); } } else @@ -909,7 +942,7 @@ public: while (*c) { if (*c < '0' || *c > '9') - throwError("invalid char from"); + throwError("Invalid char from"); res = multiply(res, 10U); res = plus(res, *c - '0'); @@ -926,11 +959,6 @@ public: // Members -template -constexpr integer::integer() noexcept - : items{} -{} - template template constexpr integer::integer(T rhs) noexcept @@ -939,7 +967,7 @@ constexpr integer::integer(T rhs) noexcept if constexpr (IsWideInteger::value) _impl::wide_integer_from_wide_integer(*this, rhs); else - _impl::wide_integer_from_bultin(*this, rhs); + _impl::wide_integer_from_builtin(*this, rhs); } template @@ -952,10 +980,19 @@ constexpr integer::integer(std::initializer_list il) noexcept if constexpr (IsWideInteger::value) _impl::wide_integer_from_wide_integer(*this, *il.begin()); else - _impl::wide_integer_from_bultin(*this, *il.begin()); + _impl::wide_integer_from_builtin(*this, *il.begin()); + } + else if (il.size() == 0) + { + _impl::wide_integer_from_builtin(*this, 0); } else - _impl::wide_integer_from_bultin(*this, 0); + { + auto it = il.begin(); + for (size_t i = 0; i < _impl::item_count; ++i) + if (it < il.end()) + items[i] = *it; + } } template @@ -970,7 +1007,7 @@ template template constexpr integer & integer::operator=(T rhs) noexcept { - _impl::wide_integer_from_bultin(*this, rhs); + _impl::wide_integer_from_builtin(*this, rhs); return *this; } @@ -1053,7 +1090,7 @@ constexpr integer & integer::operator>>=(int n) noex { if (static_cast(n) >= Bits) { - if (is_negative(*this)) + if (_impl::is_negative(*this)) *this = -1; else *this = 0; @@ -1103,16 +1140,17 @@ template template constexpr integer::operator T() const noexcept { - if constexpr (std::is_same_v) - { - static_assert(Bits >= 128); - return (__int128(items[1]) << 64) | items[0]; - } - else - { - static_assert(std::numeric_limits::is_integer); - return items[0]; - } + static_assert(std::numeric_limits::is_integer); + + /// NOTE: memcpy will suffice, but unfortunately, this function is constexpr. + + using UnsignedT = std::make_unsigned_t; + + UnsignedT res{}; + for (unsigned i = 0; i < _impl::item_count && i < (sizeof(T) + sizeof(base_type) - 1) / sizeof(base_type); ++i) + res += UnsignedT(items[i]) << (sizeof(base_type) * 8 * i); + + return res; } template @@ -1276,7 +1314,7 @@ template constexpr integer operator<<(const integer & lhs, int n) noexcept { if (static_cast(n) >= Bits) - return 0; + return integer(0); if (n <= 0) return lhs; return integer::_impl::shift_left(lhs, n); @@ -1285,7 +1323,7 @@ template constexpr integer operator>>(const integer & lhs, int n) noexcept { if (static_cast(n) >= Bits) - return 0; + return integer(0); if (n <= 0) return lhs; return integer::_impl::shift_right(lhs, n); @@ -1305,7 +1343,7 @@ constexpr bool operator<(const Arithmetic & lhs, const Arithmetic2 & rhs) template constexpr bool operator>(const integer & lhs, const integer & rhs) { - return std::common_type_t, integer>::_impl::operator_more(lhs, rhs); + return std::common_type_t, integer>::_impl::operator_greater(lhs, rhs); } template constexpr bool operator>(const Arithmetic & lhs, const Arithmetic2 & rhs) @@ -1328,7 +1366,7 @@ constexpr bool operator<=(const Arithmetic & lhs, const Arithmetic2 & rhs) template constexpr bool operator>=(const integer & lhs, const integer & rhs) { - return std::common_type_t, integer>::_impl::operator_more(lhs, rhs) + return std::common_type_t, integer>::_impl::operator_greater(lhs, rhs) || std::common_type_t, integer>::_impl::operator_eq(lhs, rhs); } template diff --git a/base/common/wide_integer_to_string.h b/base/common/wide_integer_to_string.h index 9908ef4be7a..8b794fe9bcb 100644 --- a/base/common/wide_integer_to_string.h +++ b/base/common/wide_integer_to_string.h @@ -1,9 +1,12 @@ #pragma once #include +#include +#include #include "wide_integer.h" + namespace wide { @@ -33,3 +36,34 @@ inline std::string to_string(const integer & n) } } + + +template +std::ostream & operator<<(std::ostream & out, const wide::integer & value) +{ + return out << to_string(value); +} + + +/// See https://fmt.dev/latest/api.html#formatting-user-defined-types +template +struct fmt::formatter> +{ + constexpr auto parse(format_parse_context & ctx) + { + auto it = ctx.begin(); + auto end = ctx.end(); + + /// Only support {}. + if (it != end && *it != '}') + throw format_error("invalid format"); + + return it; + } + + template + auto format(const wide::integer & value, FormatContext & ctx) + { + return format_to(ctx.out(), "{}", to_string(value)); + } +}; diff --git a/base/common/ya.make.in b/base/common/ya.make.in index b5c2bbc1717..3deb36a2c71 100644 --- a/base/common/ya.make.in +++ b/base/common/ya.make.in @@ -35,7 +35,7 @@ PEERDIR( CFLAGS(-g0) SRCS( - + ) END() diff --git a/base/daemon/SentryWriter.cpp b/base/daemon/SentryWriter.cpp index 29430b65983..1028dc7d2dc 100644 --- a/base/daemon/SentryWriter.cpp +++ b/base/daemon/SentryWriter.cpp @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -101,7 +102,7 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config) auto * logger = &Poco::Logger::get("SentryWriter"); if (config.getBool("send_crash_reports.enabled", false)) { - if (debug || (strlen(VERSION_OFFICIAL) > 0)) + if (debug || (strlen(VERSION_OFFICIAL) > 0)) //-V560 { enabled = true; } diff --git a/base/ext/scope_guard_safe.h b/base/ext/scope_guard_safe.h new file mode 100644 index 00000000000..55140213572 --- /dev/null +++ b/base/ext/scope_guard_safe.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include +#include + +/// Same as SCOPE_EXIT() but block the MEMORY_LIMIT_EXCEEDED errors. +/// +/// Typical example of SCOPE_EXIT_MEMORY() usage is when code under it may do +/// some tiny allocations, that may fail under high memory pressure or/and low +/// max_memory_usage (and related limits). +/// +/// NOTE: it should be used with caution. +#define SCOPE_EXIT_MEMORY(...) SCOPE_EXIT( \ + MemoryTracker::LockExceptionInThread \ + lock_memory_tracker(VariableContext::Global); \ + __VA_ARGS__; \ +) + +/// Same as SCOPE_EXIT() but try/catch/tryLogCurrentException any exceptions. +/// +/// SCOPE_EXIT_SAFE() should be used in case the exception during the code +/// under SCOPE_EXIT() is not "that fatal" and error message in log is enough. +/// +/// Good example is calling CurrentThread::detachQueryIfNotDetached(). +/// +/// Anti-pattern is calling WriteBuffer::finalize() under SCOPE_EXIT_SAFE() +/// (since finalize() can do final write and it is better to fail abnormally +/// instead of ignoring write error). +/// +/// NOTE: it should be used with double caution. +#define SCOPE_EXIT_SAFE(...) SCOPE_EXIT( \ + try \ + { \ + __VA_ARGS__; \ + } \ + catch (...) \ + { \ + tryLogCurrentException(__PRETTY_FUNCTION__); \ + } \ +) + +/// Same as SCOPE_EXIT() but: +/// - block the MEMORY_LIMIT_EXCEEDED errors, +/// - try/catch/tryLogCurrentException any exceptions. +/// +/// SCOPE_EXIT_MEMORY_SAFE() can be used when the error can be ignored, and in +/// addition to SCOPE_EXIT_SAFE() it will also lock MEMORY_LIMIT_EXCEEDED to +/// avoid such exceptions. +/// +/// It does exists as a separate helper, since you do not need to lock +/// MEMORY_LIMIT_EXCEEDED always (there are cases when code under SCOPE_EXIT does +/// not do any allocations, while LockExceptionInThread increment atomic +/// variable). +/// +/// NOTE: it should be used with triple caution. +#define SCOPE_EXIT_MEMORY_SAFE(...) SCOPE_EXIT( \ + try \ + { \ + MemoryTracker::LockExceptionInThread \ + lock_memory_tracker(VariableContext::Global); \ + __VA_ARGS__; \ + } \ + catch (...) \ + { \ + tryLogCurrentException(__PRETTY_FUNCTION__); \ + } \ +) diff --git a/base/glibc-compatibility/CMakeLists.txt b/base/glibc-compatibility/CMakeLists.txt index e785e2ab2ce..8cba91de33f 100644 --- a/base/glibc-compatibility/CMakeLists.txt +++ b/base/glibc-compatibility/CMakeLists.txt @@ -15,7 +15,7 @@ if (GLIBC_COMPATIBILITY) add_headers_and_sources(glibc_compatibility .) add_headers_and_sources(glibc_compatibility musl) - if (ARCH_ARM) + if (ARCH_AARCH64) list (APPEND glibc_compatibility_sources musl/aarch64/syscall.s musl/aarch64/longjmp.s) set (musl_arch_include_dir musl/aarch64) elseif (ARCH_AMD64) diff --git a/base/glibc-compatibility/musl/lgamma.c b/base/glibc-compatibility/musl/lgamma.c index fb9d105d0fa..5e959504e29 100644 --- a/base/glibc-compatibility/musl/lgamma.c +++ b/base/glibc-compatibility/musl/lgamma.c @@ -78,6 +78,9 @@ * */ +// Disable warnings by PVS-Studio +//-V::GA + static const double pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */ a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */ diff --git a/base/glibc-compatibility/musl/lgammal.c b/base/glibc-compatibility/musl/lgammal.c index b158748ce1f..775559f13b6 100644 --- a/base/glibc-compatibility/musl/lgammal.c +++ b/base/glibc-compatibility/musl/lgammal.c @@ -85,6 +85,9 @@ * */ +// Disable warnings by PVS-Studio +//-V::GA + #include #include #include "libm.h" diff --git a/base/glibc-compatibility/musl/libm.h b/base/glibc-compatibility/musl/libm.h index 55520c2fb03..e5029318693 100644 --- a/base/glibc-compatibility/musl/libm.h +++ b/base/glibc-compatibility/musl/libm.h @@ -155,7 +155,7 @@ static inline long double fp_barrierl(long double x) static inline void fp_force_evalf(float x) { volatile float y; - y = x; + y = x; //-V1001 } #endif @@ -164,7 +164,7 @@ static inline void fp_force_evalf(float x) static inline void fp_force_eval(double x) { volatile double y; - y = x; + y = x; //-V1001 } #endif @@ -173,7 +173,7 @@ static inline void fp_force_eval(double x) static inline void fp_force_evall(long double x) { volatile long double y; - y = x; + y = x; //-V1001 } #endif diff --git a/base/glibc-compatibility/musl/powf.c b/base/glibc-compatibility/musl/powf.c index de8fab54554..35dc3611b94 100644 --- a/base/glibc-compatibility/musl/powf.c +++ b/base/glibc-compatibility/musl/powf.c @@ -3,6 +3,9 @@ * SPDX-License-Identifier: MIT */ +// Disable warnings by PVS-Studio +//-V::GA + #include #include #include "libm.h" diff --git a/base/loggers/CMakeLists.txt b/base/loggers/CMakeLists.txt index 48868cf1e0d..22be002e069 100644 --- a/base/loggers/CMakeLists.txt +++ b/base/loggers/CMakeLists.txt @@ -1,4 +1,4 @@ -include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) +include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") add_headers_and_sources(loggers .) add_library(loggers ${loggers_sources} ${loggers_headers}) target_link_libraries(loggers PRIVATE dbms clickhouse_common_io) diff --git a/base/loggers/Loggers.cpp b/base/loggers/Loggers.cpp index ed806741895..913deaf1eb8 100644 --- a/base/loggers/Loggers.cpp +++ b/base/loggers/Loggers.cpp @@ -40,7 +40,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log split->addTextLog(log, text_log_max_priority); auto current_logger = config.getString("logger", ""); - if (config_logger == current_logger) + if (config_logger == current_logger) //-V1051 return; config_logger = current_logger; @@ -51,12 +51,22 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log /// Use extended interface of Channel for more comprehensive logging. split = new DB::OwnSplitChannel(); - auto log_level = config.getString("logger.level", "trace"); + auto log_level_string = config.getString("logger.level", "trace"); + + /// different channels (log, console, syslog) may have different loglevels configured + /// The maximum (the most verbose) of those will be used as default for Poco loggers + int max_log_level = 0; + const auto log_path = config.getString("logger.log", ""); if (!log_path.empty()) { createDirectory(log_path); - std::cerr << "Logging " << log_level << " to " << log_path << std::endl; + std::cerr << "Logging " << log_level_string << " to " << log_path << std::endl; + auto log_level = Poco::Logger::parseLevel(log_level_string); + if (log_level > max_log_level) + { + max_log_level = log_level; + } // Set up two channel chains. log_file = new Poco::FileChannel; @@ -69,9 +79,10 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false")); log_file->open(); - Poco::AutoPtr pf = new OwnPatternFormatter(this); + Poco::AutoPtr pf = new OwnPatternFormatter; Poco::AutoPtr log = new DB::OwnFormattingChannel(pf, log_file); + log->setLevel(log_level); split->addChannel(log); } @@ -79,6 +90,15 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log if (!errorlog_path.empty()) { createDirectory(errorlog_path); + + // NOTE: we don't use notice & critical in the code, so in practice error log collects fatal & error & warning. + // (!) Warnings are important, they require attention and should never be silenced / ignored. + auto errorlog_level = Poco::Logger::parseLevel(config.getString("logger.errorlog_level", "notice")); + if (errorlog_level > max_log_level) + { + max_log_level = errorlog_level; + } + std::cerr << "Logging errors to " << errorlog_path << std::endl; error_log_file = new Poco::FileChannel; @@ -90,20 +110,22 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log error_log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true")); error_log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false")); - Poco::AutoPtr pf = new OwnPatternFormatter(this); + Poco::AutoPtr pf = new OwnPatternFormatter; Poco::AutoPtr errorlog = new DB::OwnFormattingChannel(pf, error_log_file); - errorlog->setLevel(Poco::Message::PRIO_NOTICE); + errorlog->setLevel(errorlog_level); errorlog->open(); split->addChannel(errorlog); } - /// "dynamic_layer_selection" is needed only for Yandex.Metrika, that share part of ClickHouse code. - /// We don't need this configuration parameter. - - if (config.getBool("logger.use_syslog", false) || config.getBool("dynamic_layer_selection", false)) + if (config.getBool("logger.use_syslog", false)) { //const std::string & cmd_name = commandName(); + auto syslog_level = Poco::Logger::parseLevel(config.getString("logger.syslog_level", log_level_string)); + if (syslog_level > max_log_level) + { + max_log_level = syslog_level; + } if (config.has("logger.syslog.address")) { @@ -127,9 +149,11 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log } syslog_channel->open(); - Poco::AutoPtr pf = new OwnPatternFormatter(this, OwnPatternFormatter::ADD_LAYER_TAG); + Poco::AutoPtr pf = new OwnPatternFormatter; Poco::AutoPtr log = new DB::OwnFormattingChannel(pf, syslog_channel); + log->setLevel(syslog_level); + split->addChannel(log); } @@ -141,9 +165,17 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log { bool color_enabled = config.getBool("logger.color_terminal", color_logs_by_default); - Poco::AutoPtr pf = new OwnPatternFormatter(this, OwnPatternFormatter::ADD_NOTHING, color_enabled); + auto console_log_level_string = config.getString("logger.console_log_level", log_level_string); + auto console_log_level = Poco::Logger::parseLevel(console_log_level_string); + if (console_log_level > max_log_level) + { + max_log_level = console_log_level; + } + + Poco::AutoPtr pf = new OwnPatternFormatter(color_enabled); Poco::AutoPtr log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel); - logger.warning("Logging " + log_level + " to console"); + logger.warning("Logging " + console_log_level_string + " to console"); + log->setLevel(console_log_level); split->addChannel(log); } @@ -152,17 +184,17 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log logger.setChannel(split); // Global logging level (it can be overridden for specific loggers). - logger.setLevel(log_level); + logger.setLevel(max_log_level); // Set level to all already created loggers std::vector names; //logger_root = Logger::root(); logger.root().names(names); for (const auto & name : names) - logger.root().get(name).setLevel(log_level); + logger.root().get(name).setLevel(max_log_level); // Attach to the root logger. - logger.root().setLevel(log_level); + logger.root().setLevel(max_log_level); logger.root().setChannel(logger.getChannel()); // Explicitly specified log levels for specific loggers. diff --git a/base/loggers/Loggers.h b/base/loggers/Loggers.h index 9ed75046468..151c1d3566f 100644 --- a/base/loggers/Loggers.h +++ b/base/loggers/Loggers.h @@ -8,6 +8,7 @@ #include #include "OwnSplitChannel.h" + namespace Poco::Util { class AbstractConfiguration; @@ -21,16 +22,8 @@ public: /// Close log files. On next log write files will be reopened. void closeLogs(Poco::Logger & logger); - std::optional getLayer() const - { - return layer; /// layer set in inheritor class BaseDaemonApplication. - } - void setTextLog(std::shared_ptr log, int max_priority); -protected: - std::optional layer; - private: Poco::AutoPtr log_file; Poco::AutoPtr error_log_file; diff --git a/base/loggers/OwnFormattingChannel.h b/base/loggers/OwnFormattingChannel.h index cd2e66279d7..2336dacad04 100644 --- a/base/loggers/OwnFormattingChannel.h +++ b/base/loggers/OwnFormattingChannel.h @@ -22,6 +22,9 @@ public: void setLevel(Poco::Message::Priority priority_) { priority = priority_; } + // Poco::Logger::parseLevel returns ints + void setLevel(int level) { priority = static_cast(level); } + void open() override { if (pChannel) diff --git a/base/loggers/OwnPatternFormatter.cpp b/base/loggers/OwnPatternFormatter.cpp index 029d06ff949..e62039f4a27 100644 --- a/base/loggers/OwnPatternFormatter.cpp +++ b/base/loggers/OwnPatternFormatter.cpp @@ -13,31 +13,18 @@ #include "Loggers.h" -OwnPatternFormatter::OwnPatternFormatter(const Loggers * loggers_, OwnPatternFormatter::Options options_, bool color_) - : Poco::PatternFormatter(""), loggers(loggers_), options(options_), color(color_) +OwnPatternFormatter::OwnPatternFormatter(bool color_) + : Poco::PatternFormatter(""), color(color_) { } -void OwnPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) +void OwnPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) const { DB::WriteBufferFromString wb(text); const Poco::Message & msg = msg_ext.base; - /// For syslog: tag must be before message and first whitespace. - /// This code is only used in Yandex.Metrika and unneeded in ClickHouse. - if ((options & ADD_LAYER_TAG) && loggers) - { - auto layer = loggers->getLayer(); - if (layer) - { - writeCString("layer[", wb); - DB::writeIntText(*layer, wb); - writeCString("]: ", wb); - } - } - /// Change delimiters in date for compatibility with old logs. DB::writeDateTimeText<'.', ':'>(msg_ext.time_seconds, wb); diff --git a/base/loggers/OwnPatternFormatter.h b/base/loggers/OwnPatternFormatter.h index 4aedcc04637..fba4f0964cb 100644 --- a/base/loggers/OwnPatternFormatter.h +++ b/base/loggers/OwnPatternFormatter.h @@ -24,20 +24,11 @@ class Loggers; class OwnPatternFormatter : public Poco::PatternFormatter { public: - /// ADD_LAYER_TAG is needed only for Yandex.Metrika, that share part of ClickHouse code. - enum Options - { - ADD_NOTHING = 0, - ADD_LAYER_TAG = 1 << 0 - }; - - OwnPatternFormatter(const Loggers * loggers_, Options options_ = ADD_NOTHING, bool color_ = false); + OwnPatternFormatter(bool color_ = false); void format(const Poco::Message & msg, std::string & text) override; - void formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text); + void formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) const; private: - const Loggers * loggers; - Options options; bool color; }; diff --git a/base/mysqlxx/CMakeLists.txt b/base/mysqlxx/CMakeLists.txt index 849c58a8527..c5230c2b49f 100644 --- a/base/mysqlxx/CMakeLists.txt +++ b/base/mysqlxx/CMakeLists.txt @@ -14,8 +14,8 @@ add_library (mysqlxx target_include_directories (mysqlxx PUBLIC ..) if (USE_INTERNAL_MYSQL_LIBRARY) - target_include_directories (mysqlxx PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/include) - target_include_directories (mysqlxx PUBLIC ${ClickHouse_BINARY_DIR}/contrib/mariadb-connector-c/include) + target_include_directories (mysqlxx PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/include") + target_include_directories (mysqlxx PUBLIC "${ClickHouse_BINARY_DIR}/contrib/mariadb-connector-c/include") else () set(PLATFORM_LIBRARIES ${CMAKE_DL_LIBS}) diff --git a/base/mysqlxx/Pool.h b/base/mysqlxx/Pool.h index b6189663f55..530e2c78cf2 100644 --- a/base/mysqlxx/Pool.h +++ b/base/mysqlxx/Pool.h @@ -159,9 +159,9 @@ public: */ Pool(const std::string & db_, const std::string & server_, - const std::string & user_ = "", - const std::string & password_ = "", - unsigned port_ = 0, + const std::string & user_, + const std::string & password_, + unsigned port_, const std::string & socket_ = "", unsigned connect_timeout_ = MYSQLXX_DEFAULT_TIMEOUT, unsigned rw_timeout_ = MYSQLXX_DEFAULT_RW_TIMEOUT, diff --git a/base/pcg-random/pcg_extras.hpp b/base/pcg-random/pcg_extras.hpp index b71e859a25f..39c91c4ecfa 100644 --- a/base/pcg-random/pcg_extras.hpp +++ b/base/pcg-random/pcg_extras.hpp @@ -447,69 +447,6 @@ inline SrcIter uneven_copy(SrcIter src_first, std::integral_constant{}); } -/* generate_to, fill in a fixed-size array of integral type using a SeedSeq - * (actually works for any random-access iterator) - */ - -template -inline void generate_to_impl(SeedSeq&& generator, DestIter dest, - std::true_type) -{ - generator.generate(dest, dest+size); -} - -template -void generate_to_impl(SeedSeq&& generator, DestIter dest, - std::false_type) -{ - typedef typename std::iterator_traits::value_type dest_t; - constexpr auto DEST_SIZE = sizeof(dest_t); - constexpr auto GEN_SIZE = sizeof(uint32_t); - - constexpr bool GEN_IS_SMALLER = GEN_SIZE < DEST_SIZE; - constexpr size_t FROM_ELEMS = - GEN_IS_SMALLER - ? size * ((DEST_SIZE+GEN_SIZE-1) / GEN_SIZE) - : (size + (GEN_SIZE / DEST_SIZE) - 1) - / ((GEN_SIZE / DEST_SIZE) + GEN_IS_SMALLER); - // this odd code ^^^^^^^^^^^^^^^^^ is work-around for - // a bug: http://llvm.org/bugs/show_bug.cgi?id=21287 - - if (FROM_ELEMS <= 1024) { - uint32_t buffer[FROM_ELEMS]; - generator.generate(buffer, buffer+FROM_ELEMS); - uneven_copy(buffer, dest, dest+size); - } else { - uint32_t* buffer = static_cast(malloc(GEN_SIZE * FROM_ELEMS)); - generator.generate(buffer, buffer+FROM_ELEMS); - uneven_copy(buffer, dest, dest+size); - free(static_cast(buffer)); - } -} - -template -inline void generate_to(SeedSeq&& generator, DestIter dest) -{ - typedef typename std::iterator_traits::value_type dest_t; - constexpr bool IS_32BIT = sizeof(dest_t) == sizeof(uint32_t); - - generate_to_impl(std::forward(generator), dest, - std::integral_constant{}); -} - -/* generate_one, produce a value of integral type using a SeedSeq - * (optionally, we can have it produce more than one and pick which one - * we want) - */ - -template -inline UInt generate_one(SeedSeq&& generator) -{ - UInt result[N]; - generate_to(std::forward(generator), result); - return result[i]; -} - template auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound) -> typename RngType::result_type @@ -517,7 +454,7 @@ auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound) typedef typename RngType::result_type rtype; rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound) % upper_bound; - for (;;) { + for (;;) { //-V1044 rtype r = rng() - RngType::min(); if (r >= threshold) return r % upper_bound; diff --git a/base/pcg-random/pcg_random.hpp b/base/pcg-random/pcg_random.hpp index abf83a60ee1..d9d3519a4cf 100644 --- a/base/pcg-random/pcg_random.hpp +++ b/base/pcg-random/pcg_random.hpp @@ -928,7 +928,7 @@ struct rxs_m_xs_mixin { constexpr bitcount_t shift = bits - xtypebits; constexpr bitcount_t mask = (1 << opbits) - 1; bitcount_t rshift = - opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; //-V547 internal ^= internal >> (opbits + rshift); internal *= mcg_multiplier::multiplier(); xtype result = internal >> shift; @@ -950,7 +950,7 @@ struct rxs_m_xs_mixin { internal *= mcg_unmultiplier::unmultiplier(); - bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; + bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547 internal = unxorshift(internal, bits, opbits + rshift); return internal; @@ -975,7 +975,7 @@ struct rxs_m_mixin { : 2; constexpr bitcount_t shift = bits - xtypebits; constexpr bitcount_t mask = (1 << opbits) - 1; - bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; + bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; //-V547 internal ^= internal >> (opbits + rshift); internal *= mcg_multiplier::multiplier(); xtype result = internal >> shift; @@ -1366,7 +1366,7 @@ void extended::selfinit() // - any strange correlations would only be apparent if we // were to backstep the generator so that the base generator // was generating the same values again - result_type xdiff = baseclass::operator()() - baseclass::operator()(); + result_type xdiff = baseclass::operator()() - baseclass::operator()(); //-V501 for (size_t i = 0; i < table_size; ++i) { data_[i] = baseclass::operator()() ^ xdiff; } @@ -1643,22 +1643,22 @@ typedef setseq_base template -using ext_std8 = extended; template -using ext_std16 = extended; template -using ext_std32 = extended; template -using ext_std64 = extended; diff --git a/cmake/arch.cmake b/cmake/arch.cmake index 9604ef62b31..60e0346dbbf 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -1,7 +1,7 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64") set (ARCH_AMD64 1) endif () -if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)") +if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)") set (ARCH_AARCH64 1) endif () if (ARCH_AARCH64 OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm") diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 9d74179902d..51f4b974161 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -1,9 +1,9 @@ # This strings autochanged from release_lib.sh: -SET(VERSION_REVISION 54450) +SET(VERSION_REVISION 54451) SET(VERSION_MAJOR 21) -SET(VERSION_MINOR 5) +SET(VERSION_MINOR 6) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH 3827789b3d8fd2021952e57e5110343d26daa1a1) -SET(VERSION_DESCRIBE v21.5.1.1-prestable) -SET(VERSION_STRING 21.5.1.1) +SET(VERSION_GITHASH 96fced4c3cf432fb0b401d2ab01f0c56e5f74a96) +SET(VERSION_DESCRIBE v21.6.1.1-prestable) +SET(VERSION_STRING 21.6.1.1) # end of autochange diff --git a/cmake/check_flags.cmake b/cmake/check_flags.cmake new file mode 100644 index 00000000000..5a4ff472868 --- /dev/null +++ b/cmake/check_flags.cmake @@ -0,0 +1,6 @@ +include (CheckCXXCompilerFlag) +include (CheckCCompilerFlag) + +check_cxx_compiler_flag("-Wsuggest-destructor-override" HAS_SUGGEST_DESTRUCTOR_OVERRIDE) +check_cxx_compiler_flag("-Wshadow" HAS_SHADOW) +check_cxx_compiler_flag("-Wsuggest-override" HAS_SUGGEST_OVERRIDE) diff --git a/cmake/darwin/default_libs.cmake b/cmake/darwin/default_libs.cmake index 79ac675f234..a6ee800d59b 100644 --- a/cmake/darwin/default_libs.cmake +++ b/cmake/darwin/default_libs.cmake @@ -1,11 +1,14 @@ set (DEFAULT_LIBS "-nodefaultlibs") -if (NOT COMPILER_CLANG) - message (FATAL_ERROR "Darwin build is supported only for Clang") -endif () - set (DEFAULT_LIBS "${DEFAULT_LIBS} ${COVERAGE_OPTION} -lc -lm -lpthread -ldl") +if (COMPILER_GCC) + set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc_eh") + if (ARCH_AARCH64) + set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc") + endif () +endif () + message(STATUS "Default libraries: ${DEFAULT_LIBS}") set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS}) diff --git a/cmake/darwin/toolchain-aarch64.cmake b/cmake/darwin/toolchain-aarch64.cmake new file mode 100644 index 00000000000..81398111495 --- /dev/null +++ b/cmake/darwin/toolchain-aarch64.cmake @@ -0,0 +1,14 @@ +set (CMAKE_SYSTEM_NAME "Darwin") +set (CMAKE_SYSTEM_PROCESSOR "aarch64") +set (CMAKE_C_COMPILER_TARGET "aarch64-apple-darwin") +set (CMAKE_CXX_COMPILER_TARGET "aarch64-apple-darwin") +set (CMAKE_ASM_COMPILER_TARGET "aarch64-apple-darwin") +set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64") + +set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake + +set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) + +set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/find/amqpcpp.cmake b/cmake/find/amqpcpp.cmake index 4191dce26bb..a4a58349508 100644 --- a/cmake/find/amqpcpp.cmake +++ b/cmake/find/amqpcpp.cmake @@ -1,3 +1,8 @@ +if (MISSING_INTERNAL_LIBUV_LIBRARY) + message (WARNING "Can't find internal libuv needed for AMQP-CPP library") + set (ENABLE_AMQPCPP OFF CACHE INTERNAL "") +endif() + option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES}) if (NOT ENABLE_AMQPCPP) @@ -12,11 +17,13 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt") endif () set (USE_AMQPCPP 1) -set (AMQPCPP_LIBRARY AMQP-CPP) +set (AMQPCPP_LIBRARY amqp-cpp) set (AMQPCPP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/include") list (APPEND AMQPCPP_INCLUDE_DIR - "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/include" + "${LIBUV_INCLUDE_DIR}" "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP") +list (APPEND AMQPCPP_LIBRARY "${LIBUV_LIBRARY}") + message (STATUS "Using AMQP-CPP=${USE_AMQPCPP}: ${AMQPCPP_INCLUDE_DIR} : ${AMQPCPP_LIBRARY}") diff --git a/cmake/find/cassandra.cmake b/cmake/find/cassandra.cmake index 037d6c3f131..b6e97ff5ef8 100644 --- a/cmake/find/cassandra.cmake +++ b/cmake/find/cassandra.cmake @@ -1,3 +1,8 @@ +if (MISSING_INTERNAL_LIBUV_LIBRARY) + message (WARNING "Disabling cassandra due to missing libuv") + set (ENABLE_CASSANDRA OFF CACHE INTERNAL "") +endif() + option(ENABLE_CASSANDRA "Enable Cassandra" ${ENABLE_LIBRARIES}) if (NOT ENABLE_CASSANDRA) @@ -8,27 +13,22 @@ if (APPLE) set(CMAKE_MACOSX_RPATH ON) endif() -if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libuv") - message (ERROR "submodule contrib/libuv is missing. to fix try run: \n git submodule update --init --recursive") - message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal libuv needed for Cassandra") -elseif (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cassandra") +if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cassandra") message (ERROR "submodule contrib/cassandra is missing. to fix try run: \n git submodule update --init --recursive") message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal Cassandra") -else() - set (LIBUV_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/libuv") - set (CASSANDRA_INCLUDE_DIR - "${ClickHouse_SOURCE_DIR}/contrib/cassandra/include/") - if (MAKE_STATIC_LIBRARIES) - set (LIBUV_LIBRARY uv_a) - set (CASSANDRA_LIBRARY cassandra_static) - else() - set (LIBUV_LIBRARY uv) - set (CASSANDRA_LIBRARY cassandra) - endif() - - set (USE_CASSANDRA 1) - set (CASS_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/cassandra") + set (USE_CASSANDRA 0) + return() endif() +set (USE_CASSANDRA 1) +set (CASSANDRA_INCLUDE_DIR + "${ClickHouse_SOURCE_DIR}/contrib/cassandra/include/") +if (MAKE_STATIC_LIBRARIES) + set (CASSANDRA_LIBRARY cassandra_static) +else() + set (CASSANDRA_LIBRARY cassandra) +endif() + +set (CASS_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/cassandra") + message (STATUS "Using cassandra=${USE_CASSANDRA}: ${CASSANDRA_INCLUDE_DIR} : ${CASSANDRA_LIBRARY}") -message (STATUS "Using libuv: ${LIBUV_ROOT_DIR} : ${LIBUV_LIBRARY}") diff --git a/cmake/find/ccache.cmake b/cmake/find/ccache.cmake index fea1f8b4c97..986c9cb5fe2 100644 --- a/cmake/find/ccache.cmake +++ b/cmake/find/ccache.cmake @@ -32,7 +32,9 @@ if (CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE) if (CCACHE_VERSION VERSION_GREATER "3.2.0" OR NOT CMAKE_CXX_COMPILER_ID STREQUAL "Clang") message(STATUS "Using ${CCACHE_FOUND} ${CCACHE_VERSION}") - set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND}) + set (CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_FOUND} ${CMAKE_CXX_COMPILER_LAUNCHER}) + set (CMAKE_C_COMPILER_LAUNCHER ${CCACHE_FOUND} ${CMAKE_C_COMPILER_LAUNCHER}) + set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND}) # debian (debhelpers) set SOURCE_DATE_EPOCH environment variable, that is diff --git a/cmake/find/ldap.cmake b/cmake/find/ldap.cmake index 0dffa334e73..d8baea89429 100644 --- a/cmake/find/ldap.cmake +++ b/cmake/find/ldap.cmake @@ -64,7 +64,8 @@ if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY) ( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR ( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "ppc64le" ) OR ( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR - ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) + ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) OR + ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "aarch64" ) ) set (_ldap_supported_platform TRUE) endif () diff --git a/cmake/find/libuv.cmake b/cmake/find/libuv.cmake new file mode 100644 index 00000000000..f0023209309 --- /dev/null +++ b/cmake/find/libuv.cmake @@ -0,0 +1,22 @@ +if (OS_DARWIN AND COMPILER_GCC) + message (WARNING "libuv cannot be built with GCC in macOS due to a bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93082") + SET(MISSING_INTERNAL_LIBUV_LIBRARY 1) + return() +endif() + +if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libuv") + message (WARNING "submodule contrib/libuv is missing. to fix try run: \n git submodule update --init --recursive") + SET(MISSING_INTERNAL_LIBUV_LIBRARY 1) + return() +endif() + +if (MAKE_STATIC_LIBRARIES) + set (LIBUV_LIBRARY uv_a) +else() + set (LIBUV_LIBRARY uv) +endif() + +set (LIBUV_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/libuv") +set (LIBUV_INCLUDE_DIR "${LIBUV_ROOT_DIR}/include") + +message (STATUS "Using libuv: ${LIBUV_ROOT_DIR} : ${LIBUV_LIBRARY}") diff --git a/cmake/find/llvm.cmake b/cmake/find/llvm.cmake index e0ba1d9b039..e08f45b9932 100644 --- a/cmake/find/llvm.cmake +++ b/cmake/find/llvm.cmake @@ -2,7 +2,7 @@ if (APPLE OR SPLIT_SHARED_LIBRARIES OR NOT ARCH_AMD64) set (ENABLE_EMBEDDED_COMPILER OFF CACHE INTERNAL "") endif() -option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile_expressions' option for query execution" ${ENABLE_LIBRARIES}) +option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ON) # Broken in macos. TODO: update clang, re-test, enable on Apple if (ENABLE_EMBEDDED_COMPILER AND NOT SPLIT_SHARED_LIBRARIES AND ARCH_AMD64 AND NOT (SANITIZE STREQUAL "undefined")) option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library." ${NOT_UNBUNDLED}) @@ -24,9 +24,9 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/CMakeLists.txt") endif () if (NOT USE_INTERNAL_LLVM_LIBRARY) - set (LLVM_PATHS "/usr/local/lib/llvm") + set (LLVM_PATHS "/usr/local/lib/llvm" "/usr/lib/llvm") - foreach(llvm_v 10 9 8) + foreach(llvm_v 11.1 11) if (NOT LLVM_FOUND) find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS}) endif () @@ -102,7 +102,6 @@ LLVMRuntimeDyld LLVMX86CodeGen LLVMX86Desc LLVMX86Info -LLVMX86Utils LLVMAsmPrinter LLVMDebugInfoDWARF LLVMGlobalISel diff --git a/cmake/find/nanodbc.cmake b/cmake/find/nanodbc.cmake new file mode 100644 index 00000000000..894a2a60bad --- /dev/null +++ b/cmake/find/nanodbc.cmake @@ -0,0 +1,16 @@ +if (NOT ENABLE_ODBC) + return () +endif () + +if (NOT USE_INTERNAL_NANODBC_LIBRARY) + message (FATAL_ERROR "Only the bundled nanodbc library can be used") +endif () + +if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/CMakeLists.txt") + message (FATAL_ERROR "submodule contrib/nanodbc is missing. to fix try run: \n git submodule update --init --recursive") +endif() + +set (NANODBC_LIBRARY nanodbc) +set (NANODBC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/nanodbc") + +message (STATUS "Using nanodbc: ${NANODBC_INCLUDE_DIR} : ${NANODBC_LIBRARY}") diff --git a/cmake/find/nuraft.cmake b/cmake/find/nuraft.cmake index 7fa5251946e..4e5258e132f 100644 --- a/cmake/find/nuraft.cmake +++ b/cmake/find/nuraft.cmake @@ -11,7 +11,7 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/NuRaft/CMakeLists.txt") return() endif () -if (NOT OS_FREEBSD AND NOT OS_DARWIN) +if (NOT OS_FREEBSD) set (USE_NURAFT 1) set (NURAFT_LIBRARY nuraft) diff --git a/cmake/find/odbc.cmake b/cmake/find/odbc.cmake index a23f0c831e9..c475e600c0d 100644 --- a/cmake/find/odbc.cmake +++ b/cmake/find/odbc.cmake @@ -50,4 +50,6 @@ if (NOT EXTERNAL_ODBC_LIBRARY_FOUND) set (USE_INTERNAL_ODBC_LIBRARY 1) endif () +set (USE_INTERNAL_NANODBC_LIBRARY 1) + message (STATUS "Using unixodbc") diff --git a/cmake/find/rocksdb.cmake b/cmake/find/rocksdb.cmake index 968cdb52407..94278a603d7 100644 --- a/cmake/find/rocksdb.cmake +++ b/cmake/find/rocksdb.cmake @@ -1,3 +1,7 @@ +if (OS_DARWIN AND ARCH_AARCH64) + set (ENABLE_ROCKSDB OFF CACHE INTERNAL "") +endif() + option(ENABLE_ROCKSDB "Enable ROCKSDB" ${ENABLE_LIBRARIES}) if (NOT ENABLE_ROCKSDB) diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index 6c23ce8bc91..f60f7431389 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -40,7 +40,7 @@ if (SANITIZE) # RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to # keep the binary size down. # TODO: try compiling with -Og and with ld.gold. - set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt") + set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") diff --git a/cmake/target.cmake b/cmake/target.cmake index 7174ca3c2a9..d1a0b8f9cbf 100644 --- a/cmake/target.cmake +++ b/cmake/target.cmake @@ -12,6 +12,9 @@ elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") set (OS_DARWIN 1) add_definitions(-D OS_DARWIN) +elseif (CMAKE_SYSTEM_NAME MATCHES "SunOS") + set (OS_SUNOS 1) + add_definitions(-D OS_SUNOS) endif () if (CMAKE_CROSSCOMPILING) diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 44fc3b3e530..8ff94ab867b 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -8,10 +8,13 @@ endif () if (COMPILER_GCC) # Require minimum version of gcc - set (GCC_MINIMUM_VERSION 9) + set (GCC_MINIMUM_VERSION 10) if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION} AND NOT CMAKE_VERSION VERSION_LESS 2.8.9) message (FATAL_ERROR "GCC version must be at least ${GCC_MINIMUM_VERSION}. For example, if GCC ${GCC_MINIMUM_VERSION} is available under gcc-${GCC_MINIMUM_VERSION}, g++-${GCC_MINIMUM_VERSION} names, do the following: export CC=gcc-${GCC_MINIMUM_VERSION} CXX=g++-${GCC_MINIMUM_VERSION}; rm -rf CMakeCache.txt CMakeFiles; and re run cmake or ./release.") endif () + + message (WARNING "GCC compiler is not officially supported for ClickHouse. You should migrate to clang.") + elseif (COMPILER_CLANG) # Require minimum version of clang/apple-clang if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang") diff --git a/cmake/warnings.cmake b/cmake/warnings.cmake index a398c59e981..a85fe8963c7 100644 --- a/cmake/warnings.cmake +++ b/cmake/warnings.cmake @@ -171,6 +171,7 @@ elseif (COMPILER_GCC) add_cxx_compile_options(-Wtrampolines) # Obvious add_cxx_compile_options(-Wunused) + add_cxx_compile_options(-Wundef) # Warn if vector operation is not implemented via SIMD capabilities of the architecture add_cxx_compile_options(-Wvector-operation-performance) # XXX: libstdc++ has some of these for 3way compare diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 4aeb67a5085..9eafec23f51 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -1,4 +1,3 @@ -# Third-party libraries may have substandard code. # Put all targets defined here and in added subfolders under "contrib/" folder in GUI-based IDEs by default. # Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they will @@ -11,8 +10,10 @@ else () endif () unset (_current_dir_name) -set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w") -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w") +# Third-party libraries may have substandard code. +# Also remove a possible source of nondeterminism. +set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w -D__DATE__= -D__TIME__= -D__TIMESTAMP__=") +set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -D__DATE__= -D__TIME__= -D__TIMESTAMP__=") if (WITH_COVERAGE) set (WITHOUT_COVERAGE_LIST ${WITHOUT_COVERAGE}) @@ -47,7 +48,11 @@ add_subdirectory (lz4-cmake) add_subdirectory (murmurhash) add_subdirectory (replxx-cmake) add_subdirectory (unixodbc-cmake) -add_subdirectory (xz) +add_subdirectory (nanodbc-cmake) + +if (USE_INTERNAL_XZ_LIBRARY) + add_subdirectory (xz) +endif() add_subdirectory (poco-cmake) add_subdirectory (croaring-cmake) @@ -93,14 +98,8 @@ if (USE_INTERNAL_ZLIB_LIBRARY) add_subdirectory (${INTERNAL_ZLIB_NAME}) # We should use same defines when including zlib.h as used when zlib compiled target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP) - if (TARGET zlibstatic) - target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP) - endif () if (ARCH_AMD64 OR ARCH_AARCH64) target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK) - if (TARGET zlibstatic) - target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK) - endif () endif () endif () diff --git a/contrib/NuRaft b/contrib/NuRaft index 241fd3754a1..95d6bbba579 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 241fd3754a1eb4d82ab68a9a875dc99391ec9f02 +Subproject commit 95d6bbba579b3a4e4c2dede954f541ff6f3dba51 diff --git a/contrib/amqpcpp-cmake/CMakeLists.txt b/contrib/amqpcpp-cmake/CMakeLists.txt index 4853983680e..4e8342af125 100644 --- a/contrib/amqpcpp-cmake/CMakeLists.txt +++ b/contrib/amqpcpp-cmake/CMakeLists.txt @@ -1,25 +1,25 @@ -set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP) +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP") set (SRCS - ${LIBRARY_DIR}/src/array.cpp - ${LIBRARY_DIR}/src/channel.cpp - ${LIBRARY_DIR}/src/channelimpl.cpp - ${LIBRARY_DIR}/src/connectionimpl.cpp - ${LIBRARY_DIR}/src/deferredcancel.cpp - ${LIBRARY_DIR}/src/deferredconfirm.cpp - ${LIBRARY_DIR}/src/deferredconsumer.cpp - ${LIBRARY_DIR}/src/deferredextreceiver.cpp - ${LIBRARY_DIR}/src/deferredget.cpp - ${LIBRARY_DIR}/src/deferredpublisher.cpp - ${LIBRARY_DIR}/src/deferredreceiver.cpp - ${LIBRARY_DIR}/src/field.cpp - ${LIBRARY_DIR}/src/flags.cpp - ${LIBRARY_DIR}/src/linux_tcp/openssl.cpp - ${LIBRARY_DIR}/src/linux_tcp/tcpconnection.cpp - ${LIBRARY_DIR}/src/inbuffer.cpp - ${LIBRARY_DIR}/src/receivedframe.cpp - ${LIBRARY_DIR}/src/table.cpp - ${LIBRARY_DIR}/src/watchable.cpp + "${LIBRARY_DIR}/src/array.cpp" + "${LIBRARY_DIR}/src/channel.cpp" + "${LIBRARY_DIR}/src/channelimpl.cpp" + "${LIBRARY_DIR}/src/connectionimpl.cpp" + "${LIBRARY_DIR}/src/deferredcancel.cpp" + "${LIBRARY_DIR}/src/deferredconfirm.cpp" + "${LIBRARY_DIR}/src/deferredconsumer.cpp" + "${LIBRARY_DIR}/src/deferredextreceiver.cpp" + "${LIBRARY_DIR}/src/deferredget.cpp" + "${LIBRARY_DIR}/src/deferredpublisher.cpp" + "${LIBRARY_DIR}/src/deferredreceiver.cpp" + "${LIBRARY_DIR}/src/field.cpp" + "${LIBRARY_DIR}/src/flags.cpp" + "${LIBRARY_DIR}/src/linux_tcp/openssl.cpp" + "${LIBRARY_DIR}/src/linux_tcp/tcpconnection.cpp" + "${LIBRARY_DIR}/src/inbuffer.cpp" + "${LIBRARY_DIR}/src/receivedframe.cpp" + "${LIBRARY_DIR}/src/table.cpp" + "${LIBRARY_DIR}/src/watchable.cpp" ) add_library(amqp-cpp ${SRCS}) @@ -39,7 +39,7 @@ target_compile_options (amqp-cpp -w ) -target_include_directories (amqp-cpp SYSTEM PUBLIC ${LIBRARY_DIR}/include) +target_include_directories (amqp-cpp SYSTEM PUBLIC "${LIBRARY_DIR}/include") target_link_libraries (amqp-cpp PUBLIC ssl) diff --git a/contrib/antlr4-runtime b/contrib/antlr4-runtime index a2fa7b76e2e..672643e9a42 160000 --- a/contrib/antlr4-runtime +++ b/contrib/antlr4-runtime @@ -1 +1 @@ -Subproject commit a2fa7b76e2ee16d2ad955e9214a90bbf79da66fc +Subproject commit 672643e9a427ef803abf13bc8cb4989606553d64 diff --git a/contrib/antlr4-runtime-cmake/CMakeLists.txt b/contrib/antlr4-runtime-cmake/CMakeLists.txt index 5baefdb1e29..4f639a33ebf 100644 --- a/contrib/antlr4-runtime-cmake/CMakeLists.txt +++ b/contrib/antlr4-runtime-cmake/CMakeLists.txt @@ -1,154 +1,154 @@ -set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/antlr4-runtime) +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/antlr4-runtime") set (SRCS - ${LIBRARY_DIR}/ANTLRErrorListener.cpp - ${LIBRARY_DIR}/ANTLRErrorStrategy.cpp - ${LIBRARY_DIR}/ANTLRFileStream.cpp - ${LIBRARY_DIR}/ANTLRInputStream.cpp - ${LIBRARY_DIR}/atn/AbstractPredicateTransition.cpp - ${LIBRARY_DIR}/atn/ActionTransition.cpp - ${LIBRARY_DIR}/atn/AmbiguityInfo.cpp - ${LIBRARY_DIR}/atn/ArrayPredictionContext.cpp - ${LIBRARY_DIR}/atn/ATN.cpp - ${LIBRARY_DIR}/atn/ATNConfig.cpp - ${LIBRARY_DIR}/atn/ATNConfigSet.cpp - ${LIBRARY_DIR}/atn/ATNDeserializationOptions.cpp - ${LIBRARY_DIR}/atn/ATNDeserializer.cpp - ${LIBRARY_DIR}/atn/ATNSerializer.cpp - ${LIBRARY_DIR}/atn/ATNSimulator.cpp - ${LIBRARY_DIR}/atn/ATNState.cpp - ${LIBRARY_DIR}/atn/AtomTransition.cpp - ${LIBRARY_DIR}/atn/BasicBlockStartState.cpp - ${LIBRARY_DIR}/atn/BasicState.cpp - ${LIBRARY_DIR}/atn/BlockEndState.cpp - ${LIBRARY_DIR}/atn/BlockStartState.cpp - ${LIBRARY_DIR}/atn/ContextSensitivityInfo.cpp - ${LIBRARY_DIR}/atn/DecisionEventInfo.cpp - ${LIBRARY_DIR}/atn/DecisionInfo.cpp - ${LIBRARY_DIR}/atn/DecisionState.cpp - ${LIBRARY_DIR}/atn/EmptyPredictionContext.cpp - ${LIBRARY_DIR}/atn/EpsilonTransition.cpp - ${LIBRARY_DIR}/atn/ErrorInfo.cpp - ${LIBRARY_DIR}/atn/LexerAction.cpp - ${LIBRARY_DIR}/atn/LexerActionExecutor.cpp - ${LIBRARY_DIR}/atn/LexerATNConfig.cpp - ${LIBRARY_DIR}/atn/LexerATNSimulator.cpp - ${LIBRARY_DIR}/atn/LexerChannelAction.cpp - ${LIBRARY_DIR}/atn/LexerCustomAction.cpp - ${LIBRARY_DIR}/atn/LexerIndexedCustomAction.cpp - ${LIBRARY_DIR}/atn/LexerModeAction.cpp - ${LIBRARY_DIR}/atn/LexerMoreAction.cpp - ${LIBRARY_DIR}/atn/LexerPopModeAction.cpp - ${LIBRARY_DIR}/atn/LexerPushModeAction.cpp - ${LIBRARY_DIR}/atn/LexerSkipAction.cpp - ${LIBRARY_DIR}/atn/LexerTypeAction.cpp - ${LIBRARY_DIR}/atn/LL1Analyzer.cpp - ${LIBRARY_DIR}/atn/LookaheadEventInfo.cpp - ${LIBRARY_DIR}/atn/LoopEndState.cpp - ${LIBRARY_DIR}/atn/NotSetTransition.cpp - ${LIBRARY_DIR}/atn/OrderedATNConfigSet.cpp - ${LIBRARY_DIR}/atn/ParseInfo.cpp - ${LIBRARY_DIR}/atn/ParserATNSimulator.cpp - ${LIBRARY_DIR}/atn/PlusBlockStartState.cpp - ${LIBRARY_DIR}/atn/PlusLoopbackState.cpp - ${LIBRARY_DIR}/atn/PrecedencePredicateTransition.cpp - ${LIBRARY_DIR}/atn/PredicateEvalInfo.cpp - ${LIBRARY_DIR}/atn/PredicateTransition.cpp - ${LIBRARY_DIR}/atn/PredictionContext.cpp - ${LIBRARY_DIR}/atn/PredictionMode.cpp - ${LIBRARY_DIR}/atn/ProfilingATNSimulator.cpp - ${LIBRARY_DIR}/atn/RangeTransition.cpp - ${LIBRARY_DIR}/atn/RuleStartState.cpp - ${LIBRARY_DIR}/atn/RuleStopState.cpp - ${LIBRARY_DIR}/atn/RuleTransition.cpp - ${LIBRARY_DIR}/atn/SemanticContext.cpp - ${LIBRARY_DIR}/atn/SetTransition.cpp - ${LIBRARY_DIR}/atn/SingletonPredictionContext.cpp - ${LIBRARY_DIR}/atn/StarBlockStartState.cpp - ${LIBRARY_DIR}/atn/StarLoopbackState.cpp - ${LIBRARY_DIR}/atn/StarLoopEntryState.cpp - ${LIBRARY_DIR}/atn/TokensStartState.cpp - ${LIBRARY_DIR}/atn/Transition.cpp - ${LIBRARY_DIR}/atn/WildcardTransition.cpp - ${LIBRARY_DIR}/BailErrorStrategy.cpp - ${LIBRARY_DIR}/BaseErrorListener.cpp - ${LIBRARY_DIR}/BufferedTokenStream.cpp - ${LIBRARY_DIR}/CharStream.cpp - ${LIBRARY_DIR}/CommonToken.cpp - ${LIBRARY_DIR}/CommonTokenFactory.cpp - ${LIBRARY_DIR}/CommonTokenStream.cpp - ${LIBRARY_DIR}/ConsoleErrorListener.cpp - ${LIBRARY_DIR}/DefaultErrorStrategy.cpp - ${LIBRARY_DIR}/dfa/DFA.cpp - ${LIBRARY_DIR}/dfa/DFASerializer.cpp - ${LIBRARY_DIR}/dfa/DFAState.cpp - ${LIBRARY_DIR}/dfa/LexerDFASerializer.cpp - ${LIBRARY_DIR}/DiagnosticErrorListener.cpp - ${LIBRARY_DIR}/Exceptions.cpp - ${LIBRARY_DIR}/FailedPredicateException.cpp - ${LIBRARY_DIR}/InputMismatchException.cpp - ${LIBRARY_DIR}/InterpreterRuleContext.cpp - ${LIBRARY_DIR}/IntStream.cpp - ${LIBRARY_DIR}/Lexer.cpp - ${LIBRARY_DIR}/LexerInterpreter.cpp - ${LIBRARY_DIR}/LexerNoViableAltException.cpp - ${LIBRARY_DIR}/ListTokenSource.cpp - ${LIBRARY_DIR}/misc/InterpreterDataReader.cpp - ${LIBRARY_DIR}/misc/Interval.cpp - ${LIBRARY_DIR}/misc/IntervalSet.cpp - ${LIBRARY_DIR}/misc/MurmurHash.cpp - ${LIBRARY_DIR}/misc/Predicate.cpp - ${LIBRARY_DIR}/NoViableAltException.cpp - ${LIBRARY_DIR}/Parser.cpp - ${LIBRARY_DIR}/ParserInterpreter.cpp - ${LIBRARY_DIR}/ParserRuleContext.cpp - ${LIBRARY_DIR}/ProxyErrorListener.cpp - ${LIBRARY_DIR}/RecognitionException.cpp - ${LIBRARY_DIR}/Recognizer.cpp - ${LIBRARY_DIR}/RuleContext.cpp - ${LIBRARY_DIR}/RuleContextWithAltNum.cpp - ${LIBRARY_DIR}/RuntimeMetaData.cpp - ${LIBRARY_DIR}/support/Any.cpp - ${LIBRARY_DIR}/support/Arrays.cpp - ${LIBRARY_DIR}/support/CPPUtils.cpp - ${LIBRARY_DIR}/support/guid.cpp - ${LIBRARY_DIR}/support/StringUtils.cpp - ${LIBRARY_DIR}/Token.cpp - ${LIBRARY_DIR}/TokenSource.cpp - ${LIBRARY_DIR}/TokenStream.cpp - ${LIBRARY_DIR}/TokenStreamRewriter.cpp - ${LIBRARY_DIR}/tree/ErrorNode.cpp - ${LIBRARY_DIR}/tree/ErrorNodeImpl.cpp - ${LIBRARY_DIR}/tree/IterativeParseTreeWalker.cpp - ${LIBRARY_DIR}/tree/ParseTree.cpp - ${LIBRARY_DIR}/tree/ParseTreeListener.cpp - ${LIBRARY_DIR}/tree/ParseTreeVisitor.cpp - ${LIBRARY_DIR}/tree/ParseTreeWalker.cpp - ${LIBRARY_DIR}/tree/pattern/Chunk.cpp - ${LIBRARY_DIR}/tree/pattern/ParseTreeMatch.cpp - ${LIBRARY_DIR}/tree/pattern/ParseTreePattern.cpp - ${LIBRARY_DIR}/tree/pattern/ParseTreePatternMatcher.cpp - ${LIBRARY_DIR}/tree/pattern/RuleTagToken.cpp - ${LIBRARY_DIR}/tree/pattern/TagChunk.cpp - ${LIBRARY_DIR}/tree/pattern/TextChunk.cpp - ${LIBRARY_DIR}/tree/pattern/TokenTagToken.cpp - ${LIBRARY_DIR}/tree/TerminalNode.cpp - ${LIBRARY_DIR}/tree/TerminalNodeImpl.cpp - ${LIBRARY_DIR}/tree/Trees.cpp - ${LIBRARY_DIR}/tree/xpath/XPath.cpp - ${LIBRARY_DIR}/tree/xpath/XPathElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathLexer.cpp - ${LIBRARY_DIR}/tree/xpath/XPathLexerErrorListener.cpp - ${LIBRARY_DIR}/tree/xpath/XPathRuleAnywhereElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathRuleElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathTokenAnywhereElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathTokenElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathWildcardAnywhereElement.cpp - ${LIBRARY_DIR}/tree/xpath/XPathWildcardElement.cpp - ${LIBRARY_DIR}/UnbufferedCharStream.cpp - ${LIBRARY_DIR}/UnbufferedTokenStream.cpp - ${LIBRARY_DIR}/Vocabulary.cpp - ${LIBRARY_DIR}/WritableToken.cpp + "${LIBRARY_DIR}/ANTLRErrorListener.cpp" + "${LIBRARY_DIR}/ANTLRErrorStrategy.cpp" + "${LIBRARY_DIR}/ANTLRFileStream.cpp" + "${LIBRARY_DIR}/ANTLRInputStream.cpp" + "${LIBRARY_DIR}/atn/AbstractPredicateTransition.cpp" + "${LIBRARY_DIR}/atn/ActionTransition.cpp" + "${LIBRARY_DIR}/atn/AmbiguityInfo.cpp" + "${LIBRARY_DIR}/atn/ArrayPredictionContext.cpp" + "${LIBRARY_DIR}/atn/ATN.cpp" + "${LIBRARY_DIR}/atn/ATNConfig.cpp" + "${LIBRARY_DIR}/atn/ATNConfigSet.cpp" + "${LIBRARY_DIR}/atn/ATNDeserializationOptions.cpp" + "${LIBRARY_DIR}/atn/ATNDeserializer.cpp" + "${LIBRARY_DIR}/atn/ATNSerializer.cpp" + "${LIBRARY_DIR}/atn/ATNSimulator.cpp" + "${LIBRARY_DIR}/atn/ATNState.cpp" + "${LIBRARY_DIR}/atn/AtomTransition.cpp" + "${LIBRARY_DIR}/atn/BasicBlockStartState.cpp" + "${LIBRARY_DIR}/atn/BasicState.cpp" + "${LIBRARY_DIR}/atn/BlockEndState.cpp" + "${LIBRARY_DIR}/atn/BlockStartState.cpp" + "${LIBRARY_DIR}/atn/ContextSensitivityInfo.cpp" + "${LIBRARY_DIR}/atn/DecisionEventInfo.cpp" + "${LIBRARY_DIR}/atn/DecisionInfo.cpp" + "${LIBRARY_DIR}/atn/DecisionState.cpp" + "${LIBRARY_DIR}/atn/EmptyPredictionContext.cpp" + "${LIBRARY_DIR}/atn/EpsilonTransition.cpp" + "${LIBRARY_DIR}/atn/ErrorInfo.cpp" + "${LIBRARY_DIR}/atn/LexerAction.cpp" + "${LIBRARY_DIR}/atn/LexerActionExecutor.cpp" + "${LIBRARY_DIR}/atn/LexerATNConfig.cpp" + "${LIBRARY_DIR}/atn/LexerATNSimulator.cpp" + "${LIBRARY_DIR}/atn/LexerChannelAction.cpp" + "${LIBRARY_DIR}/atn/LexerCustomAction.cpp" + "${LIBRARY_DIR}/atn/LexerIndexedCustomAction.cpp" + "${LIBRARY_DIR}/atn/LexerModeAction.cpp" + "${LIBRARY_DIR}/atn/LexerMoreAction.cpp" + "${LIBRARY_DIR}/atn/LexerPopModeAction.cpp" + "${LIBRARY_DIR}/atn/LexerPushModeAction.cpp" + "${LIBRARY_DIR}/atn/LexerSkipAction.cpp" + "${LIBRARY_DIR}/atn/LexerTypeAction.cpp" + "${LIBRARY_DIR}/atn/LL1Analyzer.cpp" + "${LIBRARY_DIR}/atn/LookaheadEventInfo.cpp" + "${LIBRARY_DIR}/atn/LoopEndState.cpp" + "${LIBRARY_DIR}/atn/NotSetTransition.cpp" + "${LIBRARY_DIR}/atn/OrderedATNConfigSet.cpp" + "${LIBRARY_DIR}/atn/ParseInfo.cpp" + "${LIBRARY_DIR}/atn/ParserATNSimulator.cpp" + "${LIBRARY_DIR}/atn/PlusBlockStartState.cpp" + "${LIBRARY_DIR}/atn/PlusLoopbackState.cpp" + "${LIBRARY_DIR}/atn/PrecedencePredicateTransition.cpp" + "${LIBRARY_DIR}/atn/PredicateEvalInfo.cpp" + "${LIBRARY_DIR}/atn/PredicateTransition.cpp" + "${LIBRARY_DIR}/atn/PredictionContext.cpp" + "${LIBRARY_DIR}/atn/PredictionMode.cpp" + "${LIBRARY_DIR}/atn/ProfilingATNSimulator.cpp" + "${LIBRARY_DIR}/atn/RangeTransition.cpp" + "${LIBRARY_DIR}/atn/RuleStartState.cpp" + "${LIBRARY_DIR}/atn/RuleStopState.cpp" + "${LIBRARY_DIR}/atn/RuleTransition.cpp" + "${LIBRARY_DIR}/atn/SemanticContext.cpp" + "${LIBRARY_DIR}/atn/SetTransition.cpp" + "${LIBRARY_DIR}/atn/SingletonPredictionContext.cpp" + "${LIBRARY_DIR}/atn/StarBlockStartState.cpp" + "${LIBRARY_DIR}/atn/StarLoopbackState.cpp" + "${LIBRARY_DIR}/atn/StarLoopEntryState.cpp" + "${LIBRARY_DIR}/atn/TokensStartState.cpp" + "${LIBRARY_DIR}/atn/Transition.cpp" + "${LIBRARY_DIR}/atn/WildcardTransition.cpp" + "${LIBRARY_DIR}/BailErrorStrategy.cpp" + "${LIBRARY_DIR}/BaseErrorListener.cpp" + "${LIBRARY_DIR}/BufferedTokenStream.cpp" + "${LIBRARY_DIR}/CharStream.cpp" + "${LIBRARY_DIR}/CommonToken.cpp" + "${LIBRARY_DIR}/CommonTokenFactory.cpp" + "${LIBRARY_DIR}/CommonTokenStream.cpp" + "${LIBRARY_DIR}/ConsoleErrorListener.cpp" + "${LIBRARY_DIR}/DefaultErrorStrategy.cpp" + "${LIBRARY_DIR}/dfa/DFA.cpp" + "${LIBRARY_DIR}/dfa/DFASerializer.cpp" + "${LIBRARY_DIR}/dfa/DFAState.cpp" + "${LIBRARY_DIR}/dfa/LexerDFASerializer.cpp" + "${LIBRARY_DIR}/DiagnosticErrorListener.cpp" + "${LIBRARY_DIR}/Exceptions.cpp" + "${LIBRARY_DIR}/FailedPredicateException.cpp" + "${LIBRARY_DIR}/InputMismatchException.cpp" + "${LIBRARY_DIR}/InterpreterRuleContext.cpp" + "${LIBRARY_DIR}/IntStream.cpp" + "${LIBRARY_DIR}/Lexer.cpp" + "${LIBRARY_DIR}/LexerInterpreter.cpp" + "${LIBRARY_DIR}/LexerNoViableAltException.cpp" + "${LIBRARY_DIR}/ListTokenSource.cpp" + "${LIBRARY_DIR}/misc/InterpreterDataReader.cpp" + "${LIBRARY_DIR}/misc/Interval.cpp" + "${LIBRARY_DIR}/misc/IntervalSet.cpp" + "${LIBRARY_DIR}/misc/MurmurHash.cpp" + "${LIBRARY_DIR}/misc/Predicate.cpp" + "${LIBRARY_DIR}/NoViableAltException.cpp" + "${LIBRARY_DIR}/Parser.cpp" + "${LIBRARY_DIR}/ParserInterpreter.cpp" + "${LIBRARY_DIR}/ParserRuleContext.cpp" + "${LIBRARY_DIR}/ProxyErrorListener.cpp" + "${LIBRARY_DIR}/RecognitionException.cpp" + "${LIBRARY_DIR}/Recognizer.cpp" + "${LIBRARY_DIR}/RuleContext.cpp" + "${LIBRARY_DIR}/RuleContextWithAltNum.cpp" + "${LIBRARY_DIR}/RuntimeMetaData.cpp" + "${LIBRARY_DIR}/support/Any.cpp" + "${LIBRARY_DIR}/support/Arrays.cpp" + "${LIBRARY_DIR}/support/CPPUtils.cpp" + "${LIBRARY_DIR}/support/guid.cpp" + "${LIBRARY_DIR}/support/StringUtils.cpp" + "${LIBRARY_DIR}/Token.cpp" + "${LIBRARY_DIR}/TokenSource.cpp" + "${LIBRARY_DIR}/TokenStream.cpp" + "${LIBRARY_DIR}/TokenStreamRewriter.cpp" + "${LIBRARY_DIR}/tree/ErrorNode.cpp" + "${LIBRARY_DIR}/tree/ErrorNodeImpl.cpp" + "${LIBRARY_DIR}/tree/IterativeParseTreeWalker.cpp" + "${LIBRARY_DIR}/tree/ParseTree.cpp" + "${LIBRARY_DIR}/tree/ParseTreeListener.cpp" + "${LIBRARY_DIR}/tree/ParseTreeVisitor.cpp" + "${LIBRARY_DIR}/tree/ParseTreeWalker.cpp" + "${LIBRARY_DIR}/tree/pattern/Chunk.cpp" + "${LIBRARY_DIR}/tree/pattern/ParseTreeMatch.cpp" + "${LIBRARY_DIR}/tree/pattern/ParseTreePattern.cpp" + "${LIBRARY_DIR}/tree/pattern/ParseTreePatternMatcher.cpp" + "${LIBRARY_DIR}/tree/pattern/RuleTagToken.cpp" + "${LIBRARY_DIR}/tree/pattern/TagChunk.cpp" + "${LIBRARY_DIR}/tree/pattern/TextChunk.cpp" + "${LIBRARY_DIR}/tree/pattern/TokenTagToken.cpp" + "${LIBRARY_DIR}/tree/TerminalNode.cpp" + "${LIBRARY_DIR}/tree/TerminalNodeImpl.cpp" + "${LIBRARY_DIR}/tree/Trees.cpp" + "${LIBRARY_DIR}/tree/xpath/XPath.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathLexer.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathLexerErrorListener.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathRuleAnywhereElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathRuleElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathTokenAnywhereElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathTokenElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathWildcardAnywhereElement.cpp" + "${LIBRARY_DIR}/tree/xpath/XPathWildcardElement.cpp" + "${LIBRARY_DIR}/UnbufferedCharStream.cpp" + "${LIBRARY_DIR}/UnbufferedTokenStream.cpp" + "${LIBRARY_DIR}/Vocabulary.cpp" + "${LIBRARY_DIR}/WritableToken.cpp" ) add_library (antlr4-runtime ${SRCS}) diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 4b402a9db79..deefb244beb 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -2,69 +2,69 @@ set (CMAKE_CXX_STANDARD 17) # === thrift -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp") # contrib/thrift/lib/cpp/CMakeLists.txt set(thriftcpp_SOURCES - ${LIBRARY_DIR}/src/thrift/TApplicationException.cpp - ${LIBRARY_DIR}/src/thrift/TOutput.cpp - ${LIBRARY_DIR}/src/thrift/async/TAsyncChannel.cpp - ${LIBRARY_DIR}/src/thrift/async/TAsyncProtocolProcessor.cpp - ${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.h - ${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/ThreadManager.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/TimerManager.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/Util.cpp - ${LIBRARY_DIR}/src/thrift/processor/PeekProcessor.cpp - ${LIBRARY_DIR}/src/thrift/protocol/TBase64Utils.cpp - ${LIBRARY_DIR}/src/thrift/protocol/TDebugProtocol.cpp - ${LIBRARY_DIR}/src/thrift/protocol/TJSONProtocol.cpp - ${LIBRARY_DIR}/src/thrift/protocol/TMultiplexedProtocol.cpp - ${LIBRARY_DIR}/src/thrift/protocol/TProtocol.cpp - ${LIBRARY_DIR}/src/thrift/transport/TTransportException.cpp - ${LIBRARY_DIR}/src/thrift/transport/TFDTransport.cpp - ${LIBRARY_DIR}/src/thrift/transport/TSimpleFileTransport.cpp - ${LIBRARY_DIR}/src/thrift/transport/THttpTransport.cpp - ${LIBRARY_DIR}/src/thrift/transport/THttpClient.cpp - ${LIBRARY_DIR}/src/thrift/transport/THttpServer.cpp - ${LIBRARY_DIR}/src/thrift/transport/TSocket.cpp - ${LIBRARY_DIR}/src/thrift/transport/TSocketPool.cpp - ${LIBRARY_DIR}/src/thrift/transport/TServerSocket.cpp - ${LIBRARY_DIR}/src/thrift/transport/TTransportUtils.cpp - ${LIBRARY_DIR}/src/thrift/transport/TBufferTransports.cpp - ${LIBRARY_DIR}/src/thrift/server/TConnectedClient.cpp - ${LIBRARY_DIR}/src/thrift/server/TServerFramework.cpp - ${LIBRARY_DIR}/src/thrift/server/TSimpleServer.cpp - ${LIBRARY_DIR}/src/thrift/server/TThreadPoolServer.cpp - ${LIBRARY_DIR}/src/thrift/server/TThreadedServer.cpp + "${LIBRARY_DIR}/src/thrift/TApplicationException.cpp" + "${LIBRARY_DIR}/src/thrift/TOutput.cpp" + "${LIBRARY_DIR}/src/thrift/async/TAsyncChannel.cpp" + "${LIBRARY_DIR}/src/thrift/async/TAsyncProtocolProcessor.cpp" + "${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.h" + "${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/ThreadManager.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/TimerManager.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/Util.cpp" + "${LIBRARY_DIR}/src/thrift/processor/PeekProcessor.cpp" + "${LIBRARY_DIR}/src/thrift/protocol/TBase64Utils.cpp" + "${LIBRARY_DIR}/src/thrift/protocol/TDebugProtocol.cpp" + "${LIBRARY_DIR}/src/thrift/protocol/TJSONProtocol.cpp" + "${LIBRARY_DIR}/src/thrift/protocol/TMultiplexedProtocol.cpp" + "${LIBRARY_DIR}/src/thrift/protocol/TProtocol.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TTransportException.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TFDTransport.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TSimpleFileTransport.cpp" + "${LIBRARY_DIR}/src/thrift/transport/THttpTransport.cpp" + "${LIBRARY_DIR}/src/thrift/transport/THttpClient.cpp" + "${LIBRARY_DIR}/src/thrift/transport/THttpServer.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TSocket.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TSocketPool.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TServerSocket.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TTransportUtils.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TBufferTransports.cpp" + "${LIBRARY_DIR}/src/thrift/server/TConnectedClient.cpp" + "${LIBRARY_DIR}/src/thrift/server/TServerFramework.cpp" + "${LIBRARY_DIR}/src/thrift/server/TSimpleServer.cpp" + "${LIBRARY_DIR}/src/thrift/server/TThreadPoolServer.cpp" + "${LIBRARY_DIR}/src/thrift/server/TThreadedServer.cpp" ) set(thriftcpp_threads_SOURCES - ${LIBRARY_DIR}/src/thrift/concurrency/ThreadFactory.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/Thread.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/Monitor.cpp - ${LIBRARY_DIR}/src/thrift/concurrency/Mutex.cpp + "${LIBRARY_DIR}/src/thrift/concurrency/ThreadFactory.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/Thread.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/Monitor.cpp" + "${LIBRARY_DIR}/src/thrift/concurrency/Mutex.cpp" ) add_library(${THRIFT_LIBRARY} ${thriftcpp_SOURCES} ${thriftcpp_threads_SOURCES}) set_target_properties(${THRIFT_LIBRARY} PROPERTIES CXX_STANDARD 14) # REMOVE after https://github.com/apache/thrift/pull/1641 -target_include_directories(${THRIFT_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp/src) +target_include_directories(${THRIFT_LIBRARY} SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp/src") target_link_libraries (${THRIFT_LIBRARY} PRIVATE boost::headers_only) # === orc -set(ORC_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/orc/c++) -set(ORC_INCLUDE_DIR ${ORC_SOURCE_DIR}/include) -set(ORC_SOURCE_SRC_DIR ${ORC_SOURCE_DIR}/src) -set(ORC_SOURCE_WRAP_DIR ${ORC_SOURCE_DIR}/wrap) +set(ORC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/orc/c++") +set(ORC_INCLUDE_DIR "${ORC_SOURCE_DIR}/include") +set(ORC_SOURCE_SRC_DIR "${ORC_SOURCE_DIR}/src") +set(ORC_SOURCE_WRAP_DIR "${ORC_SOURCE_DIR}/wrap") -set(ORC_BUILD_SRC_DIR ${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/src) -set(ORC_BUILD_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/include) +set(ORC_BUILD_SRC_DIR "${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/src") +set(ORC_BUILD_INCLUDE_DIR "${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/include") -set(GOOGLE_PROTOBUF_DIR ${Protobuf_INCLUDE_DIR}/) +set(GOOGLE_PROTOBUF_DIR "${Protobuf_INCLUDE_DIR}/") set(ORC_ADDITION_SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR}) -set(ARROW_SRC_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src) +set(ARROW_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src") set(PROTOBUF_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE}) -set(PROTO_DIR ${ORC_SOURCE_DIR}/../proto) +set(PROTO_DIR "${ORC_SOURCE_DIR}/../proto") add_custom_command(OUTPUT orc_proto.pb.h orc_proto.pb.cc @@ -75,9 +75,9 @@ add_custom_command(OUTPUT orc_proto.pb.h orc_proto.pb.cc # === flatbuffers -set(FLATBUFFERS_SRC_DIR ${ClickHouse_SOURCE_DIR}/contrib/flatbuffers) -set(FLATBUFFERS_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/flatbuffers) -set(FLATBUFFERS_INCLUDE_DIR ${FLATBUFFERS_SRC_DIR}/include) +set(FLATBUFFERS_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/flatbuffers") +set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers") +set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include") # set flatbuffers CMake options if (MAKE_STATIC_LIBRARIES) @@ -101,187 +101,187 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") set(CXX11_FLAGS "-std=c++0x") endif () -include(${ClickHouse_SOURCE_DIR}/contrib/orc/cmake_modules/CheckSourceCompiles.cmake) +include("${ClickHouse_SOURCE_DIR}/contrib/orc/cmake_modules/CheckSourceCompiles.cmake") include(orc_check.cmake) configure_file("${ORC_INCLUDE_DIR}/orc/orc-config.hh.in" "${ORC_BUILD_INCLUDE_DIR}/orc/orc-config.hh") configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/Adaptor.hh") set(ORC_SRCS - ${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc - ${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc - ${ORC_SOURCE_SRC_DIR}/Exceptions.cc - ${ORC_SOURCE_SRC_DIR}/OrcFile.cc - ${ORC_SOURCE_SRC_DIR}/Reader.cc - ${ORC_SOURCE_SRC_DIR}/ByteRLE.cc - ${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc - ${ORC_SOURCE_SRC_DIR}/ColumnReader.cc - ${ORC_SOURCE_SRC_DIR}/ColumnWriter.cc - ${ORC_SOURCE_SRC_DIR}/Common.cc - ${ORC_SOURCE_SRC_DIR}/Compression.cc - ${ORC_SOURCE_SRC_DIR}/Exceptions.cc - ${ORC_SOURCE_SRC_DIR}/Int128.cc - ${ORC_SOURCE_SRC_DIR}/LzoDecompressor.cc - ${ORC_SOURCE_SRC_DIR}/MemoryPool.cc - ${ORC_SOURCE_SRC_DIR}/OrcFile.cc - ${ORC_SOURCE_SRC_DIR}/Reader.cc - ${ORC_SOURCE_SRC_DIR}/RLE.cc - ${ORC_SOURCE_SRC_DIR}/RLEv1.cc - ${ORC_SOURCE_SRC_DIR}/RLEv2.cc - ${ORC_SOURCE_SRC_DIR}/Statistics.cc - ${ORC_SOURCE_SRC_DIR}/StripeStream.cc - ${ORC_SOURCE_SRC_DIR}/Timezone.cc - ${ORC_SOURCE_SRC_DIR}/TypeImpl.cc - ${ORC_SOURCE_SRC_DIR}/Vector.cc - ${ORC_SOURCE_SRC_DIR}/Writer.cc - ${ORC_SOURCE_SRC_DIR}/io/InputStream.cc - ${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc - ${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc + "${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc" + "${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc" + "${ORC_SOURCE_SRC_DIR}/Exceptions.cc" + "${ORC_SOURCE_SRC_DIR}/OrcFile.cc" + "${ORC_SOURCE_SRC_DIR}/Reader.cc" + "${ORC_SOURCE_SRC_DIR}/ByteRLE.cc" + "${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc" + "${ORC_SOURCE_SRC_DIR}/ColumnReader.cc" + "${ORC_SOURCE_SRC_DIR}/ColumnWriter.cc" + "${ORC_SOURCE_SRC_DIR}/Common.cc" + "${ORC_SOURCE_SRC_DIR}/Compression.cc" + "${ORC_SOURCE_SRC_DIR}/Exceptions.cc" + "${ORC_SOURCE_SRC_DIR}/Int128.cc" + "${ORC_SOURCE_SRC_DIR}/LzoDecompressor.cc" + "${ORC_SOURCE_SRC_DIR}/MemoryPool.cc" + "${ORC_SOURCE_SRC_DIR}/OrcFile.cc" + "${ORC_SOURCE_SRC_DIR}/Reader.cc" + "${ORC_SOURCE_SRC_DIR}/RLE.cc" + "${ORC_SOURCE_SRC_DIR}/RLEv1.cc" + "${ORC_SOURCE_SRC_DIR}/RLEv2.cc" + "${ORC_SOURCE_SRC_DIR}/Statistics.cc" + "${ORC_SOURCE_SRC_DIR}/StripeStream.cc" + "${ORC_SOURCE_SRC_DIR}/Timezone.cc" + "${ORC_SOURCE_SRC_DIR}/TypeImpl.cc" + "${ORC_SOURCE_SRC_DIR}/Vector.cc" + "${ORC_SOURCE_SRC_DIR}/Writer.cc" + "${ORC_SOURCE_SRC_DIR}/io/InputStream.cc" + "${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc" + "${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc" ) # === arrow -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/arrow) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/arrow") configure_file("${LIBRARY_DIR}/util/config.h.cmake" "${CMAKE_CURRENT_BINARY_DIR}/cpp/src/arrow/util/config.h") # arrow/cpp/src/arrow/CMakeLists.txt set(ARROW_SRCS - ${LIBRARY_DIR}/buffer.cc - ${LIBRARY_DIR}/builder.cc - ${LIBRARY_DIR}/chunked_array.cc - ${LIBRARY_DIR}/compare.cc - ${LIBRARY_DIR}/datum.cc - ${LIBRARY_DIR}/device.cc - ${LIBRARY_DIR}/extension_type.cc - ${LIBRARY_DIR}/memory_pool.cc - ${LIBRARY_DIR}/pretty_print.cc - ${LIBRARY_DIR}/record_batch.cc - ${LIBRARY_DIR}/result.cc - ${LIBRARY_DIR}/scalar.cc - ${LIBRARY_DIR}/sparse_tensor.cc - ${LIBRARY_DIR}/status.cc - ${LIBRARY_DIR}/table_builder.cc - ${LIBRARY_DIR}/table.cc - ${LIBRARY_DIR}/tensor.cc - ${LIBRARY_DIR}/type.cc - ${LIBRARY_DIR}/visitor.cc + "${LIBRARY_DIR}/buffer.cc" + "${LIBRARY_DIR}/builder.cc" + "${LIBRARY_DIR}/chunked_array.cc" + "${LIBRARY_DIR}/compare.cc" + "${LIBRARY_DIR}/datum.cc" + "${LIBRARY_DIR}/device.cc" + "${LIBRARY_DIR}/extension_type.cc" + "${LIBRARY_DIR}/memory_pool.cc" + "${LIBRARY_DIR}/pretty_print.cc" + "${LIBRARY_DIR}/record_batch.cc" + "${LIBRARY_DIR}/result.cc" + "${LIBRARY_DIR}/scalar.cc" + "${LIBRARY_DIR}/sparse_tensor.cc" + "${LIBRARY_DIR}/status.cc" + "${LIBRARY_DIR}/table_builder.cc" + "${LIBRARY_DIR}/table.cc" + "${LIBRARY_DIR}/tensor.cc" + "${LIBRARY_DIR}/type.cc" + "${LIBRARY_DIR}/visitor.cc" - ${LIBRARY_DIR}/array/array_base.cc - ${LIBRARY_DIR}/array/array_binary.cc - ${LIBRARY_DIR}/array/array_decimal.cc - ${LIBRARY_DIR}/array/array_dict.cc - ${LIBRARY_DIR}/array/array_nested.cc - ${LIBRARY_DIR}/array/array_primitive.cc - ${LIBRARY_DIR}/array/builder_adaptive.cc - ${LIBRARY_DIR}/array/builder_base.cc - ${LIBRARY_DIR}/array/builder_binary.cc - ${LIBRARY_DIR}/array/builder_decimal.cc - ${LIBRARY_DIR}/array/builder_dict.cc - ${LIBRARY_DIR}/array/builder_nested.cc - ${LIBRARY_DIR}/array/builder_primitive.cc - ${LIBRARY_DIR}/array/builder_union.cc - ${LIBRARY_DIR}/array/concatenate.cc - ${LIBRARY_DIR}/array/data.cc - ${LIBRARY_DIR}/array/diff.cc - ${LIBRARY_DIR}/array/util.cc - ${LIBRARY_DIR}/array/validate.cc + "${LIBRARY_DIR}/array/array_base.cc" + "${LIBRARY_DIR}/array/array_binary.cc" + "${LIBRARY_DIR}/array/array_decimal.cc" + "${LIBRARY_DIR}/array/array_dict.cc" + "${LIBRARY_DIR}/array/array_nested.cc" + "${LIBRARY_DIR}/array/array_primitive.cc" + "${LIBRARY_DIR}/array/builder_adaptive.cc" + "${LIBRARY_DIR}/array/builder_base.cc" + "${LIBRARY_DIR}/array/builder_binary.cc" + "${LIBRARY_DIR}/array/builder_decimal.cc" + "${LIBRARY_DIR}/array/builder_dict.cc" + "${LIBRARY_DIR}/array/builder_nested.cc" + "${LIBRARY_DIR}/array/builder_primitive.cc" + "${LIBRARY_DIR}/array/builder_union.cc" + "${LIBRARY_DIR}/array/concatenate.cc" + "${LIBRARY_DIR}/array/data.cc" + "${LIBRARY_DIR}/array/diff.cc" + "${LIBRARY_DIR}/array/util.cc" + "${LIBRARY_DIR}/array/validate.cc" - ${LIBRARY_DIR}/compute/api_scalar.cc - ${LIBRARY_DIR}/compute/api_vector.cc - ${LIBRARY_DIR}/compute/cast.cc - ${LIBRARY_DIR}/compute/exec.cc - ${LIBRARY_DIR}/compute/function.cc - ${LIBRARY_DIR}/compute/kernel.cc - ${LIBRARY_DIR}/compute/registry.cc + "${LIBRARY_DIR}/compute/api_scalar.cc" + "${LIBRARY_DIR}/compute/api_vector.cc" + "${LIBRARY_DIR}/compute/cast.cc" + "${LIBRARY_DIR}/compute/exec.cc" + "${LIBRARY_DIR}/compute/function.cc" + "${LIBRARY_DIR}/compute/kernel.cc" + "${LIBRARY_DIR}/compute/registry.cc" - ${LIBRARY_DIR}/compute/kernels/aggregate_basic.cc - ${LIBRARY_DIR}/compute/kernels/aggregate_mode.cc - ${LIBRARY_DIR}/compute/kernels/aggregate_var_std.cc - ${LIBRARY_DIR}/compute/kernels/codegen_internal.cc - ${LIBRARY_DIR}/compute/kernels/scalar_arithmetic.cc - ${LIBRARY_DIR}/compute/kernels/scalar_boolean.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_boolean.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_internal.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_nested.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_numeric.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_string.cc - ${LIBRARY_DIR}/compute/kernels/scalar_cast_temporal.cc - ${LIBRARY_DIR}/compute/kernels/scalar_compare.cc - ${LIBRARY_DIR}/compute/kernels/scalar_fill_null.cc - ${LIBRARY_DIR}/compute/kernels/scalar_nested.cc - ${LIBRARY_DIR}/compute/kernels/scalar_set_lookup.cc - ${LIBRARY_DIR}/compute/kernels/scalar_string.cc - ${LIBRARY_DIR}/compute/kernels/scalar_validity.cc - ${LIBRARY_DIR}/compute/kernels/vector_hash.cc - ${LIBRARY_DIR}/compute/kernels/vector_nested.cc - ${LIBRARY_DIR}/compute/kernels/vector_selection.cc - ${LIBRARY_DIR}/compute/kernels/vector_sort.cc - ${LIBRARY_DIR}/compute/kernels/util_internal.cc + "${LIBRARY_DIR}/compute/kernels/aggregate_basic.cc" + "${LIBRARY_DIR}/compute/kernels/aggregate_mode.cc" + "${LIBRARY_DIR}/compute/kernels/aggregate_var_std.cc" + "${LIBRARY_DIR}/compute/kernels/codegen_internal.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_arithmetic.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_boolean.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_boolean.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_internal.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_nested.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_numeric.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_string.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_cast_temporal.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_compare.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_fill_null.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_nested.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_set_lookup.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_string.cc" + "${LIBRARY_DIR}/compute/kernels/scalar_validity.cc" + "${LIBRARY_DIR}/compute/kernels/vector_hash.cc" + "${LIBRARY_DIR}/compute/kernels/vector_nested.cc" + "${LIBRARY_DIR}/compute/kernels/vector_selection.cc" + "${LIBRARY_DIR}/compute/kernels/vector_sort.cc" + "${LIBRARY_DIR}/compute/kernels/util_internal.cc" - ${LIBRARY_DIR}/csv/chunker.cc - ${LIBRARY_DIR}/csv/column_builder.cc - ${LIBRARY_DIR}/csv/column_decoder.cc - ${LIBRARY_DIR}/csv/converter.cc - ${LIBRARY_DIR}/csv/options.cc - ${LIBRARY_DIR}/csv/parser.cc - ${LIBRARY_DIR}/csv/reader.cc + "${LIBRARY_DIR}/csv/chunker.cc" + "${LIBRARY_DIR}/csv/column_builder.cc" + "${LIBRARY_DIR}/csv/column_decoder.cc" + "${LIBRARY_DIR}/csv/converter.cc" + "${LIBRARY_DIR}/csv/options.cc" + "${LIBRARY_DIR}/csv/parser.cc" + "${LIBRARY_DIR}/csv/reader.cc" - ${LIBRARY_DIR}/ipc/dictionary.cc - ${LIBRARY_DIR}/ipc/feather.cc - ${LIBRARY_DIR}/ipc/message.cc - ${LIBRARY_DIR}/ipc/metadata_internal.cc - ${LIBRARY_DIR}/ipc/options.cc - ${LIBRARY_DIR}/ipc/reader.cc - ${LIBRARY_DIR}/ipc/writer.cc + "${LIBRARY_DIR}/ipc/dictionary.cc" + "${LIBRARY_DIR}/ipc/feather.cc" + "${LIBRARY_DIR}/ipc/message.cc" + "${LIBRARY_DIR}/ipc/metadata_internal.cc" + "${LIBRARY_DIR}/ipc/options.cc" + "${LIBRARY_DIR}/ipc/reader.cc" + "${LIBRARY_DIR}/ipc/writer.cc" - ${LIBRARY_DIR}/io/buffered.cc - ${LIBRARY_DIR}/io/caching.cc - ${LIBRARY_DIR}/io/compressed.cc - ${LIBRARY_DIR}/io/file.cc - ${LIBRARY_DIR}/io/interfaces.cc - ${LIBRARY_DIR}/io/memory.cc - ${LIBRARY_DIR}/io/slow.cc + "${LIBRARY_DIR}/io/buffered.cc" + "${LIBRARY_DIR}/io/caching.cc" + "${LIBRARY_DIR}/io/compressed.cc" + "${LIBRARY_DIR}/io/file.cc" + "${LIBRARY_DIR}/io/interfaces.cc" + "${LIBRARY_DIR}/io/memory.cc" + "${LIBRARY_DIR}/io/slow.cc" - ${LIBRARY_DIR}/tensor/coo_converter.cc - ${LIBRARY_DIR}/tensor/csf_converter.cc - ${LIBRARY_DIR}/tensor/csx_converter.cc + "${LIBRARY_DIR}/tensor/coo_converter.cc" + "${LIBRARY_DIR}/tensor/csf_converter.cc" + "${LIBRARY_DIR}/tensor/csx_converter.cc" - ${LIBRARY_DIR}/util/basic_decimal.cc - ${LIBRARY_DIR}/util/bit_block_counter.cc - ${LIBRARY_DIR}/util/bit_run_reader.cc - ${LIBRARY_DIR}/util/bit_util.cc - ${LIBRARY_DIR}/util/bitmap.cc - ${LIBRARY_DIR}/util/bitmap_builders.cc - ${LIBRARY_DIR}/util/bitmap_ops.cc - ${LIBRARY_DIR}/util/bpacking.cc - ${LIBRARY_DIR}/util/compression.cc - ${LIBRARY_DIR}/util/compression_lz4.cc - ${LIBRARY_DIR}/util/compression_snappy.cc - ${LIBRARY_DIR}/util/compression_zlib.cc - ${LIBRARY_DIR}/util/compression_zstd.cc - ${LIBRARY_DIR}/util/cpu_info.cc - ${LIBRARY_DIR}/util/decimal.cc - ${LIBRARY_DIR}/util/delimiting.cc - ${LIBRARY_DIR}/util/formatting.cc - ${LIBRARY_DIR}/util/future.cc - ${LIBRARY_DIR}/util/int_util.cc - ${LIBRARY_DIR}/util/io_util.cc - ${LIBRARY_DIR}/util/iterator.cc - ${LIBRARY_DIR}/util/key_value_metadata.cc - ${LIBRARY_DIR}/util/logging.cc - ${LIBRARY_DIR}/util/memory.cc - ${LIBRARY_DIR}/util/string_builder.cc - ${LIBRARY_DIR}/util/string.cc - ${LIBRARY_DIR}/util/task_group.cc - ${LIBRARY_DIR}/util/thread_pool.cc - ${LIBRARY_DIR}/util/time.cc - ${LIBRARY_DIR}/util/trie.cc - ${LIBRARY_DIR}/util/utf8.cc - ${LIBRARY_DIR}/util/value_parsing.cc + "${LIBRARY_DIR}/util/basic_decimal.cc" + "${LIBRARY_DIR}/util/bit_block_counter.cc" + "${LIBRARY_DIR}/util/bit_run_reader.cc" + "${LIBRARY_DIR}/util/bit_util.cc" + "${LIBRARY_DIR}/util/bitmap.cc" + "${LIBRARY_DIR}/util/bitmap_builders.cc" + "${LIBRARY_DIR}/util/bitmap_ops.cc" + "${LIBRARY_DIR}/util/bpacking.cc" + "${LIBRARY_DIR}/util/compression.cc" + "${LIBRARY_DIR}/util/compression_lz4.cc" + "${LIBRARY_DIR}/util/compression_snappy.cc" + "${LIBRARY_DIR}/util/compression_zlib.cc" + "${LIBRARY_DIR}/util/compression_zstd.cc" + "${LIBRARY_DIR}/util/cpu_info.cc" + "${LIBRARY_DIR}/util/decimal.cc" + "${LIBRARY_DIR}/util/delimiting.cc" + "${LIBRARY_DIR}/util/formatting.cc" + "${LIBRARY_DIR}/util/future.cc" + "${LIBRARY_DIR}/util/int_util.cc" + "${LIBRARY_DIR}/util/io_util.cc" + "${LIBRARY_DIR}/util/iterator.cc" + "${LIBRARY_DIR}/util/key_value_metadata.cc" + "${LIBRARY_DIR}/util/logging.cc" + "${LIBRARY_DIR}/util/memory.cc" + "${LIBRARY_DIR}/util/string_builder.cc" + "${LIBRARY_DIR}/util/string.cc" + "${LIBRARY_DIR}/util/task_group.cc" + "${LIBRARY_DIR}/util/thread_pool.cc" + "${LIBRARY_DIR}/util/time.cc" + "${LIBRARY_DIR}/util/trie.cc" + "${LIBRARY_DIR}/util/utf8.cc" + "${LIBRARY_DIR}/util/value_parsing.cc" - ${LIBRARY_DIR}/vendored/base64.cpp + "${LIBRARY_DIR}/vendored/base64.cpp" ${ORC_SRCS} ) @@ -298,21 +298,21 @@ if (ZSTD_INCLUDE_DIR AND ZSTD_LIBRARY) endif () add_definitions(-DARROW_WITH_LZ4) -SET(ARROW_SRCS ${LIBRARY_DIR}/util/compression_lz4.cc ${ARROW_SRCS}) +SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_lz4.cc" ${ARROW_SRCS}) if (ARROW_WITH_SNAPPY) add_definitions(-DARROW_WITH_SNAPPY) - SET(ARROW_SRCS ${LIBRARY_DIR}/util/compression_snappy.cc ${ARROW_SRCS}) + SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_snappy.cc" ${ARROW_SRCS}) endif () if (ARROW_WITH_ZLIB) add_definitions(-DARROW_WITH_ZLIB) - SET(ARROW_SRCS ${LIBRARY_DIR}/util/compression_zlib.cc ${ARROW_SRCS}) + SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_zlib.cc" ${ARROW_SRCS}) endif () if (ARROW_WITH_ZSTD) add_definitions(-DARROW_WITH_ZSTD) - SET(ARROW_SRCS ${LIBRARY_DIR}/util/compression_zstd.cc ${ARROW_SRCS}) + SET(ARROW_SRCS "${LIBRARY_DIR}/util/compression_zstd.cc" ${ARROW_SRCS}) endif () @@ -327,8 +327,8 @@ if (USE_INTERNAL_PROTOBUF_LIBRARY) add_dependencies(${ARROW_LIBRARY} protoc) endif () -target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src) -target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/cpp/src) +target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src") +target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/cpp/src") target_link_libraries(${ARROW_LIBRARY} PRIVATE ${DOUBLE_CONVERSION_LIBRARIES} ${Protobuf_LIBRARY}) target_link_libraries(${ARROW_LIBRARY} PRIVATE lz4) if (ARROW_WITH_SNAPPY) @@ -354,46 +354,46 @@ target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${FLATBUFFERS_INCLUDE # === parquet -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/parquet) -set(GEN_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/generated) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/parquet") +set(GEN_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/generated") # arrow/cpp/src/parquet/CMakeLists.txt set(PARQUET_SRCS - ${LIBRARY_DIR}/arrow/path_internal.cc - ${LIBRARY_DIR}/arrow/reader.cc - ${LIBRARY_DIR}/arrow/reader_internal.cc - ${LIBRARY_DIR}/arrow/schema.cc - ${LIBRARY_DIR}/arrow/schema_internal.cc - ${LIBRARY_DIR}/arrow/writer.cc - ${LIBRARY_DIR}/bloom_filter.cc - ${LIBRARY_DIR}/column_reader.cc - ${LIBRARY_DIR}/column_scanner.cc - ${LIBRARY_DIR}/column_writer.cc - ${LIBRARY_DIR}/deprecated_io.cc - ${LIBRARY_DIR}/encoding.cc - ${LIBRARY_DIR}/encryption.cc - ${LIBRARY_DIR}/encryption_internal.cc - ${LIBRARY_DIR}/file_reader.cc - ${LIBRARY_DIR}/file_writer.cc - ${LIBRARY_DIR}/internal_file_decryptor.cc - ${LIBRARY_DIR}/internal_file_encryptor.cc - ${LIBRARY_DIR}/level_conversion.cc - ${LIBRARY_DIR}/level_comparison.cc - ${LIBRARY_DIR}/metadata.cc - ${LIBRARY_DIR}/murmur3.cc - ${LIBRARY_DIR}/platform.cc - ${LIBRARY_DIR}/printer.cc - ${LIBRARY_DIR}/properties.cc - ${LIBRARY_DIR}/schema.cc - ${LIBRARY_DIR}/statistics.cc - ${LIBRARY_DIR}/types.cc + "${LIBRARY_DIR}/arrow/path_internal.cc" + "${LIBRARY_DIR}/arrow/reader.cc" + "${LIBRARY_DIR}/arrow/reader_internal.cc" + "${LIBRARY_DIR}/arrow/schema.cc" + "${LIBRARY_DIR}/arrow/schema_internal.cc" + "${LIBRARY_DIR}/arrow/writer.cc" + "${LIBRARY_DIR}/bloom_filter.cc" + "${LIBRARY_DIR}/column_reader.cc" + "${LIBRARY_DIR}/column_scanner.cc" + "${LIBRARY_DIR}/column_writer.cc" + "${LIBRARY_DIR}/deprecated_io.cc" + "${LIBRARY_DIR}/encoding.cc" + "${LIBRARY_DIR}/encryption.cc" + "${LIBRARY_DIR}/encryption_internal.cc" + "${LIBRARY_DIR}/file_reader.cc" + "${LIBRARY_DIR}/file_writer.cc" + "${LIBRARY_DIR}/internal_file_decryptor.cc" + "${LIBRARY_DIR}/internal_file_encryptor.cc" + "${LIBRARY_DIR}/level_conversion.cc" + "${LIBRARY_DIR}/level_comparison.cc" + "${LIBRARY_DIR}/metadata.cc" + "${LIBRARY_DIR}/murmur3.cc" + "${LIBRARY_DIR}/platform.cc" + "${LIBRARY_DIR}/printer.cc" + "${LIBRARY_DIR}/properties.cc" + "${LIBRARY_DIR}/schema.cc" + "${LIBRARY_DIR}/statistics.cc" + "${LIBRARY_DIR}/types.cc" - ${GEN_LIBRARY_DIR}/parquet_constants.cpp - ${GEN_LIBRARY_DIR}/parquet_types.cpp + "${GEN_LIBRARY_DIR}/parquet_constants.cpp" + "${GEN_LIBRARY_DIR}/parquet_types.cpp" ) -#list(TRANSFORM PARQUET_SRCS PREPEND ${LIBRARY_DIR}/) # cmake 3.12 +#list(TRANSFORM PARQUET_SRCS PREPEND "${LIBRARY_DIR}/") # cmake 3.12 add_library(${PARQUET_LIBRARY} ${PARQUET_SRCS}) -target_include_directories(${PARQUET_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src ${CMAKE_CURRENT_SOURCE_DIR}/cpp/src PRIVATE ${OPENSSL_INCLUDE_DIR}) -include(${ClickHouse_SOURCE_DIR}/contrib/thrift/build/cmake/ConfigureChecks.cmake) # makes config.h +target_include_directories(${PARQUET_LIBRARY} SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src" "${CMAKE_CURRENT_SOURCE_DIR}/cpp/src" PRIVATE ${OPENSSL_INCLUDE_DIR}) +include("${ClickHouse_SOURCE_DIR}/contrib/thrift/build/cmake/ConfigureChecks.cmake") # makes config.h target_link_libraries(${PARQUET_LIBRARY} PUBLIC ${ARROW_LIBRARY} PRIVATE ${THRIFT_LIBRARY} boost::headers_only boost::regex ${OPENSSL_LIBRARIES}) if (SANITIZE STREQUAL "undefined") @@ -403,9 +403,9 @@ endif () # === tools -set(TOOLS_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/tools/parquet) +set(TOOLS_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/tools/parquet") set(PARQUET_TOOLS parquet_dump_schema parquet_reader parquet_scan) foreach (TOOL ${PARQUET_TOOLS}) - add_executable(${TOOL} ${TOOLS_DIR}/${TOOL}.cc) + add_executable(${TOOL} "${TOOLS_DIR}/${TOOL}.cc") target_link_libraries(${TOOL} PRIVATE ${PARQUET_LIBRARY}) endforeach () diff --git a/contrib/avro-cmake/CMakeLists.txt b/contrib/avro-cmake/CMakeLists.txt index 052a19ee804..b56afd1598c 100644 --- a/contrib/avro-cmake/CMakeLists.txt +++ b/contrib/avro-cmake/CMakeLists.txt @@ -1,10 +1,10 @@ -set(AVROCPP_ROOT_DIR ${CMAKE_SOURCE_DIR}/contrib/avro/lang/c++) -set(AVROCPP_INCLUDE_DIR ${AVROCPP_ROOT_DIR}/api) -set(AVROCPP_SOURCE_DIR ${AVROCPP_ROOT_DIR}/impl) +set(AVROCPP_ROOT_DIR "${CMAKE_SOURCE_DIR}/contrib/avro/lang/c++") +set(AVROCPP_INCLUDE_DIR "${AVROCPP_ROOT_DIR}/api") +set(AVROCPP_SOURCE_DIR "${AVROCPP_ROOT_DIR}/impl") set (CMAKE_CXX_STANDARD 17) -if (EXISTS ${AVROCPP_ROOT_DIR}/../../share/VERSION.txt) +if (EXISTS "${AVROCPP_ROOT_DIR}/../../share/VERSION.txt") file(READ "${AVROCPP_ROOT_DIR}/../../share/VERSION.txt" AVRO_VERSION) endif() @@ -14,30 +14,30 @@ set (AVRO_VERSION_MAJOR ${AVRO_VERSION}) set (AVRO_VERSION_MINOR "0") set (AVROCPP_SOURCE_FILES - ${AVROCPP_SOURCE_DIR}/Compiler.cc - ${AVROCPP_SOURCE_DIR}/Node.cc - ${AVROCPP_SOURCE_DIR}/LogicalType.cc - ${AVROCPP_SOURCE_DIR}/NodeImpl.cc - ${AVROCPP_SOURCE_DIR}/ResolverSchema.cc - ${AVROCPP_SOURCE_DIR}/Schema.cc - ${AVROCPP_SOURCE_DIR}/Types.cc - ${AVROCPP_SOURCE_DIR}/ValidSchema.cc - ${AVROCPP_SOURCE_DIR}/Zigzag.cc - ${AVROCPP_SOURCE_DIR}/BinaryEncoder.cc - ${AVROCPP_SOURCE_DIR}/BinaryDecoder.cc - ${AVROCPP_SOURCE_DIR}/Stream.cc - ${AVROCPP_SOURCE_DIR}/FileStream.cc - ${AVROCPP_SOURCE_DIR}/Generic.cc - ${AVROCPP_SOURCE_DIR}/GenericDatum.cc - ${AVROCPP_SOURCE_DIR}/DataFile.cc - ${AVROCPP_SOURCE_DIR}/parsing/Symbol.cc - ${AVROCPP_SOURCE_DIR}/parsing/ValidatingCodec.cc - ${AVROCPP_SOURCE_DIR}/parsing/JsonCodec.cc - ${AVROCPP_SOURCE_DIR}/parsing/ResolvingDecoder.cc - ${AVROCPP_SOURCE_DIR}/json/JsonIO.cc - ${AVROCPP_SOURCE_DIR}/json/JsonDom.cc - ${AVROCPP_SOURCE_DIR}/Resolver.cc - ${AVROCPP_SOURCE_DIR}/Validator.cc + "${AVROCPP_SOURCE_DIR}/Compiler.cc" + "${AVROCPP_SOURCE_DIR}/Node.cc" + "${AVROCPP_SOURCE_DIR}/LogicalType.cc" + "${AVROCPP_SOURCE_DIR}/NodeImpl.cc" + "${AVROCPP_SOURCE_DIR}/ResolverSchema.cc" + "${AVROCPP_SOURCE_DIR}/Schema.cc" + "${AVROCPP_SOURCE_DIR}/Types.cc" + "${AVROCPP_SOURCE_DIR}/ValidSchema.cc" + "${AVROCPP_SOURCE_DIR}/Zigzag.cc" + "${AVROCPP_SOURCE_DIR}/BinaryEncoder.cc" + "${AVROCPP_SOURCE_DIR}/BinaryDecoder.cc" + "${AVROCPP_SOURCE_DIR}/Stream.cc" + "${AVROCPP_SOURCE_DIR}/FileStream.cc" + "${AVROCPP_SOURCE_DIR}/Generic.cc" + "${AVROCPP_SOURCE_DIR}/GenericDatum.cc" + "${AVROCPP_SOURCE_DIR}/DataFile.cc" + "${AVROCPP_SOURCE_DIR}/parsing/Symbol.cc" + "${AVROCPP_SOURCE_DIR}/parsing/ValidatingCodec.cc" + "${AVROCPP_SOURCE_DIR}/parsing/JsonCodec.cc" + "${AVROCPP_SOURCE_DIR}/parsing/ResolvingDecoder.cc" + "${AVROCPP_SOURCE_DIR}/json/JsonIO.cc" + "${AVROCPP_SOURCE_DIR}/json/JsonDom.cc" + "${AVROCPP_SOURCE_DIR}/Resolver.cc" + "${AVROCPP_SOURCE_DIR}/Validator.cc" ) add_library (avrocpp ${AVROCPP_SOURCE_FILES}) @@ -63,7 +63,7 @@ target_compile_options(avrocpp PRIVATE ${SUPPRESS_WARNINGS}) # create a symlink to include headers with ADD_CUSTOM_TARGET(avro_symlink_headers ALL - COMMAND ${CMAKE_COMMAND} -E make_directory ${AVROCPP_ROOT_DIR}/include - COMMAND ${CMAKE_COMMAND} -E create_symlink ${AVROCPP_ROOT_DIR}/api ${AVROCPP_ROOT_DIR}/include/avro + COMMAND ${CMAKE_COMMAND} -E make_directory "${AVROCPP_ROOT_DIR}/include" + COMMAND ${CMAKE_COMMAND} -E create_symlink "${AVROCPP_ROOT_DIR}/api" "${AVROCPP_ROOT_DIR}/include/avro" ) add_dependencies(avrocpp avro_symlink_headers) diff --git a/contrib/aws-s3-cmake/CMakeLists.txt b/contrib/aws-s3-cmake/CMakeLists.txt index 02dee91c70c..723ceac3991 100644 --- a/contrib/aws-s3-cmake/CMakeLists.txt +++ b/contrib/aws-s3-cmake/CMakeLists.txt @@ -1,8 +1,8 @@ -SET(AWS_S3_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3) -SET(AWS_CORE_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core) -SET(AWS_CHECKSUMS_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-checksums) -SET(AWS_COMMON_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-c-common) -SET(AWS_EVENT_STREAM_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream) +SET(AWS_S3_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3") +SET(AWS_CORE_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core") +SET(AWS_CHECKSUMS_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums") +SET(AWS_COMMON_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common") +SET(AWS_EVENT_STREAM_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream") OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF) configure_file("${AWS_CORE_LIBRARY_DIR}/include/aws/core/SDKConfig.h.in" diff --git a/contrib/base64-cmake/CMakeLists.txt b/contrib/base64-cmake/CMakeLists.txt index a295ee45b84..4ebb4e68728 100644 --- a/contrib/base64-cmake/CMakeLists.txt +++ b/contrib/base64-cmake/CMakeLists.txt @@ -1,11 +1,11 @@ -SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/base64) +SET(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/base64") -add_library(base64_scalar OBJECT ${LIBRARY_DIR}/turbob64c.c ${LIBRARY_DIR}/turbob64d.c) -add_library(base64_ssse3 OBJECT ${LIBRARY_DIR}/turbob64sse.c) # This file also contains code for ARM NEON +add_library(base64_scalar OBJECT "${LIBRARY_DIR}/turbob64c.c" "${LIBRARY_DIR}/turbob64d.c") +add_library(base64_ssse3 OBJECT "${LIBRARY_DIR}/turbob64sse.c") # This file also contains code for ARM NEON if (ARCH_AMD64) - add_library(base64_avx OBJECT ${LIBRARY_DIR}/turbob64sse.c) # This is not a mistake. One file is compiled twice. - add_library(base64_avx2 OBJECT ${LIBRARY_DIR}/turbob64avx2.c) + add_library(base64_avx OBJECT "${LIBRARY_DIR}/turbob64sse.c") # This is not a mistake. One file is compiled twice. + add_library(base64_avx2 OBJECT "${LIBRARY_DIR}/turbob64avx2.c") endif () target_compile_options(base64_scalar PRIVATE -falign-loops) diff --git a/contrib/boost b/contrib/boost index ee24fa55bc4..1ccbb5a522a 160000 --- a/contrib/boost +++ b/contrib/boost @@ -1 +1 @@ -Subproject commit ee24fa55bc46e4d2ce7d0d052cc5a0d9b1be8c36 +Subproject commit 1ccbb5a522a571ce83b606dbc2e1011c42ecccfb diff --git a/contrib/boost-cmake/CMakeLists.txt b/contrib/boost-cmake/CMakeLists.txt index 0759935a7db..9f6c5b1255d 100644 --- a/contrib/boost-cmake/CMakeLists.txt +++ b/contrib/boost-cmake/CMakeLists.txt @@ -56,19 +56,19 @@ endif() if (NOT EXTERNAL_BOOST_FOUND) set (USE_INTERNAL_BOOST_LIBRARY 1) - set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/boost) + set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/boost") # filesystem set (SRCS_FILESYSTEM - ${LIBRARY_DIR}/libs/filesystem/src/codecvt_error_category.cpp - ${LIBRARY_DIR}/libs/filesystem/src/operations.cpp - ${LIBRARY_DIR}/libs/filesystem/src/path_traits.cpp - ${LIBRARY_DIR}/libs/filesystem/src/path.cpp - ${LIBRARY_DIR}/libs/filesystem/src/portability.cpp - ${LIBRARY_DIR}/libs/filesystem/src/unique_path.cpp - ${LIBRARY_DIR}/libs/filesystem/src/utf8_codecvt_facet.cpp - ${LIBRARY_DIR}/libs/filesystem/src/windows_file_codecvt.cpp + "${LIBRARY_DIR}/libs/filesystem/src/codecvt_error_category.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/operations.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/path_traits.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/path.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/portability.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/unique_path.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/utf8_codecvt_facet.cpp" + "${LIBRARY_DIR}/libs/filesystem/src/windows_file_codecvt.cpp" ) add_library (_boost_filesystem ${SRCS_FILESYSTEM}) @@ -88,10 +88,10 @@ if (NOT EXTERNAL_BOOST_FOUND) # iostreams set (SRCS_IOSTREAMS - ${LIBRARY_DIR}/libs/iostreams/src/file_descriptor.cpp - ${LIBRARY_DIR}/libs/iostreams/src/gzip.cpp - ${LIBRARY_DIR}/libs/iostreams/src/mapped_file.cpp - ${LIBRARY_DIR}/libs/iostreams/src/zlib.cpp + "${LIBRARY_DIR}/libs/iostreams/src/file_descriptor.cpp" + "${LIBRARY_DIR}/libs/iostreams/src/gzip.cpp" + "${LIBRARY_DIR}/libs/iostreams/src/mapped_file.cpp" + "${LIBRARY_DIR}/libs/iostreams/src/zlib.cpp" ) add_library (_boost_iostreams ${SRCS_IOSTREAMS}) @@ -102,17 +102,17 @@ if (NOT EXTERNAL_BOOST_FOUND) # program_options set (SRCS_PROGRAM_OPTIONS - ${LIBRARY_DIR}/libs/program_options/src/cmdline.cpp - ${LIBRARY_DIR}/libs/program_options/src/config_file.cpp - ${LIBRARY_DIR}/libs/program_options/src/convert.cpp - ${LIBRARY_DIR}/libs/program_options/src/options_description.cpp - ${LIBRARY_DIR}/libs/program_options/src/parsers.cpp - ${LIBRARY_DIR}/libs/program_options/src/positional_options.cpp - ${LIBRARY_DIR}/libs/program_options/src/split.cpp - ${LIBRARY_DIR}/libs/program_options/src/utf8_codecvt_facet.cpp - ${LIBRARY_DIR}/libs/program_options/src/value_semantic.cpp - ${LIBRARY_DIR}/libs/program_options/src/variables_map.cpp - ${LIBRARY_DIR}/libs/program_options/src/winmain.cpp + "${LIBRARY_DIR}/libs/program_options/src/cmdline.cpp" + "${LIBRARY_DIR}/libs/program_options/src/config_file.cpp" + "${LIBRARY_DIR}/libs/program_options/src/convert.cpp" + "${LIBRARY_DIR}/libs/program_options/src/options_description.cpp" + "${LIBRARY_DIR}/libs/program_options/src/parsers.cpp" + "${LIBRARY_DIR}/libs/program_options/src/positional_options.cpp" + "${LIBRARY_DIR}/libs/program_options/src/split.cpp" + "${LIBRARY_DIR}/libs/program_options/src/utf8_codecvt_facet.cpp" + "${LIBRARY_DIR}/libs/program_options/src/value_semantic.cpp" + "${LIBRARY_DIR}/libs/program_options/src/variables_map.cpp" + "${LIBRARY_DIR}/libs/program_options/src/winmain.cpp" ) add_library (_boost_program_options ${SRCS_PROGRAM_OPTIONS}) @@ -122,24 +122,24 @@ if (NOT EXTERNAL_BOOST_FOUND) # regex set (SRCS_REGEX - ${LIBRARY_DIR}/libs/regex/src/c_regex_traits.cpp - ${LIBRARY_DIR}/libs/regex/src/cpp_regex_traits.cpp - ${LIBRARY_DIR}/libs/regex/src/cregex.cpp - ${LIBRARY_DIR}/libs/regex/src/fileiter.cpp - ${LIBRARY_DIR}/libs/regex/src/icu.cpp - ${LIBRARY_DIR}/libs/regex/src/instances.cpp - ${LIBRARY_DIR}/libs/regex/src/internals.hpp - ${LIBRARY_DIR}/libs/regex/src/posix_api.cpp - ${LIBRARY_DIR}/libs/regex/src/regex_debug.cpp - ${LIBRARY_DIR}/libs/regex/src/regex_raw_buffer.cpp - ${LIBRARY_DIR}/libs/regex/src/regex_traits_defaults.cpp - ${LIBRARY_DIR}/libs/regex/src/regex.cpp - ${LIBRARY_DIR}/libs/regex/src/static_mutex.cpp - ${LIBRARY_DIR}/libs/regex/src/usinstances.cpp - ${LIBRARY_DIR}/libs/regex/src/w32_regex_traits.cpp - ${LIBRARY_DIR}/libs/regex/src/wc_regex_traits.cpp - ${LIBRARY_DIR}/libs/regex/src/wide_posix_api.cpp - ${LIBRARY_DIR}/libs/regex/src/winstances.cpp + "${LIBRARY_DIR}/libs/regex/src/c_regex_traits.cpp" + "${LIBRARY_DIR}/libs/regex/src/cpp_regex_traits.cpp" + "${LIBRARY_DIR}/libs/regex/src/cregex.cpp" + "${LIBRARY_DIR}/libs/regex/src/fileiter.cpp" + "${LIBRARY_DIR}/libs/regex/src/icu.cpp" + "${LIBRARY_DIR}/libs/regex/src/instances.cpp" + "${LIBRARY_DIR}/libs/regex/src/internals.hpp" + "${LIBRARY_DIR}/libs/regex/src/posix_api.cpp" + "${LIBRARY_DIR}/libs/regex/src/regex_debug.cpp" + "${LIBRARY_DIR}/libs/regex/src/regex_raw_buffer.cpp" + "${LIBRARY_DIR}/libs/regex/src/regex_traits_defaults.cpp" + "${LIBRARY_DIR}/libs/regex/src/regex.cpp" + "${LIBRARY_DIR}/libs/regex/src/static_mutex.cpp" + "${LIBRARY_DIR}/libs/regex/src/usinstances.cpp" + "${LIBRARY_DIR}/libs/regex/src/w32_regex_traits.cpp" + "${LIBRARY_DIR}/libs/regex/src/wc_regex_traits.cpp" + "${LIBRARY_DIR}/libs/regex/src/wide_posix_api.cpp" + "${LIBRARY_DIR}/libs/regex/src/winstances.cpp" ) add_library (_boost_regex ${SRCS_REGEX}) @@ -149,7 +149,7 @@ if (NOT EXTERNAL_BOOST_FOUND) # system set (SRCS_SYSTEM - ${LIBRARY_DIR}/libs/system/src/error_code.cpp + "${LIBRARY_DIR}/libs/system/src/error_code.cpp" ) add_library (_boost_system ${SRCS_SYSTEM}) @@ -161,9 +161,9 @@ if (NOT EXTERNAL_BOOST_FOUND) SET(ASM_OPTIONS "-x assembler-with-cpp") set (SRCS_CONTEXT - ${LIBRARY_DIR}/libs/context/src/dummy.cpp - ${LIBRARY_DIR}/libs/context/src/execution_context.cpp - ${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp + "${LIBRARY_DIR}/libs/context/src/dummy.cpp" + "${LIBRARY_DIR}/libs/context/src/execution_context.cpp" + "${LIBRARY_DIR}/libs/context/src/posix/stack_traits.cpp" ) if (SANITIZE AND (SANITIZE STREQUAL "address" OR SANITIZE STREQUAL "thread")) @@ -176,33 +176,33 @@ if (NOT EXTERNAL_BOOST_FOUND) endif() set (SRCS_CONTEXT ${SRCS_CONTEXT} - ${LIBRARY_DIR}/libs/context/src/fiber.cpp - ${LIBRARY_DIR}/libs/context/src/continuation.cpp + "${LIBRARY_DIR}/libs/context/src/fiber.cpp" + "${LIBRARY_DIR}/libs/context/src/continuation.cpp" ) endif() if (ARCH_ARM) set (SRCS_CONTEXT ${SRCS_CONTEXT} - ${LIBRARY_DIR}/libs/context/src/asm/jump_arm64_aapcs_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/make_arm64_aapcs_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/ontop_arm64_aapcs_elf_gas.S + "${LIBRARY_DIR}/libs/context/src/asm/jump_arm64_aapcs_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/make_arm64_aapcs_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/ontop_arm64_aapcs_elf_gas.S" ) elseif (ARCH_PPC64LE) set (SRCS_CONTEXT ${SRCS_CONTEXT} - ${LIBRARY_DIR}/libs/context/src/asm/jump_ppc64_sysv_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/make_ppc64_sysv_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/ontop_ppc64_sysv_elf_gas.S + "${LIBRARY_DIR}/libs/context/src/asm/jump_ppc64_sysv_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/make_ppc64_sysv_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/ontop_ppc64_sysv_elf_gas.S" ) elseif(OS_DARWIN) set (SRCS_CONTEXT ${SRCS_CONTEXT} - ${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_macho_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/make_x86_64_sysv_macho_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/ontop_x86_64_sysv_macho_gas.S + "${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_macho_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/make_x86_64_sysv_macho_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/ontop_x86_64_sysv_macho_gas.S" ) else() set (SRCS_CONTEXT ${SRCS_CONTEXT} - ${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/make_x86_64_sysv_elf_gas.S - ${LIBRARY_DIR}/libs/context/src/asm/ontop_x86_64_sysv_elf_gas.S + "${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/make_x86_64_sysv_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/ontop_x86_64_sysv_elf_gas.S" ) endif() @@ -213,9 +213,9 @@ if (NOT EXTERNAL_BOOST_FOUND) # coroutine set (SRCS_COROUTINE - ${LIBRARY_DIR}/libs/coroutine/detail/coroutine_context.cpp - ${LIBRARY_DIR}/libs/coroutine/exceptions.cpp - ${LIBRARY_DIR}/libs/coroutine/posix/stack_traits.cpp + "${LIBRARY_DIR}/libs/coroutine/detail/coroutine_context.cpp" + "${LIBRARY_DIR}/libs/coroutine/exceptions.cpp" + "${LIBRARY_DIR}/libs/coroutine/posix/stack_traits.cpp" ) add_library (_boost_coroutine ${SRCS_COROUTINE}) add_library (boost::coroutine ALIAS _boost_coroutine) diff --git a/contrib/boringssl b/contrib/boringssl index fd9ce1a0406..a6a2e2ab3e4 160000 --- a/contrib/boringssl +++ b/contrib/boringssl @@ -1 +1 @@ -Subproject commit fd9ce1a0406f571507068b9555d0b545b8a18332 +Subproject commit a6a2e2ab3e44d97ce98e51c558e989f211de7eb3 diff --git a/contrib/boringssl-cmake/CMakeLists.txt b/contrib/boringssl-cmake/CMakeLists.txt index 017a8a64c0e..9d8c6ca6083 100644 --- a/contrib/boringssl-cmake/CMakeLists.txt +++ b/contrib/boringssl-cmake/CMakeLists.txt @@ -8,7 +8,7 @@ cmake_minimum_required(VERSION 3.0) project(BoringSSL LANGUAGES C CXX) -set(BORINGSSL_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/boringssl) +set(BORINGSSL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/boringssl") if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") set(CLANG 1) @@ -16,7 +16,7 @@ endif() if(CMAKE_COMPILER_IS_GNUCXX OR CLANG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -fvisibility=hidden -fno-common -fno-exceptions -fno-rtti") - if(APPLE) + if(APPLE AND CLANG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") endif() @@ -130,7 +130,7 @@ if(BUILD_SHARED_LIBS) set(CMAKE_POSITION_INDEPENDENT_CODE TRUE) endif() -include_directories(${BORINGSSL_SOURCE_DIR}/include) +include_directories("${BORINGSSL_SOURCE_DIR}/include") set( CRYPTO_ios_aarch64_SOURCES @@ -192,8 +192,8 @@ set( linux-arm/crypto/fipsmodule/sha512-armv4.S linux-arm/crypto/fipsmodule/vpaes-armv7.S linux-arm/crypto/test/trampoline-armv4.S - ${BORINGSSL_SOURCE_DIR}/crypto/curve25519/asm/x25519-asm-arm.S - ${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_arm_asm.S + "${BORINGSSL_SOURCE_DIR}/crypto/curve25519/asm/x25519-asm-arm.S" + "${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_arm_asm.S" ) set( @@ -244,7 +244,7 @@ set( linux-x86_64/crypto/fipsmodule/x86_64-mont.S linux-x86_64/crypto/fipsmodule/x86_64-mont5.S linux-x86_64/crypto/test/trampoline-x86_64.S - ${BORINGSSL_SOURCE_DIR}/crypto/hrss/asm/poly_rq_mul.S + "${BORINGSSL_SOURCE_DIR}/crypto/hrss/asm/poly_rq_mul.S" ) set( @@ -348,300 +348,300 @@ add_library( ${CRYPTO_ARCH_SOURCES} err_data.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_bitstr.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_bool.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_d2i_fp.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_dup.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_enum.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_gentm.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_i2d_fp.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_int.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_mbstr.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_object.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_octet.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_print.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_strnid.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_time.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_type.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_utctm.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_utf8.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn1_lib.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn1_par.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn_pack.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_enum.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_int.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_string.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_dec.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_enc.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_fre.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_new.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_typ.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_utl.c - ${BORINGSSL_SOURCE_DIR}/crypto/asn1/time_support.c - ${BORINGSSL_SOURCE_DIR}/crypto/base64/base64.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/bio.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/bio_mem.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/connect.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/fd.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/file.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/hexdump.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/pair.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/printf.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/socket.c - ${BORINGSSL_SOURCE_DIR}/crypto/bio/socket_helper.c - ${BORINGSSL_SOURCE_DIR}/crypto/bn_extra/bn_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/bn_extra/convert.c - ${BORINGSSL_SOURCE_DIR}/crypto/buf/buf.c - ${BORINGSSL_SOURCE_DIR}/crypto/bytestring/asn1_compat.c - ${BORINGSSL_SOURCE_DIR}/crypto/bytestring/ber.c - ${BORINGSSL_SOURCE_DIR}/crypto/bytestring/cbb.c - ${BORINGSSL_SOURCE_DIR}/crypto/bytestring/cbs.c - ${BORINGSSL_SOURCE_DIR}/crypto/bytestring/unicode.c - ${BORINGSSL_SOURCE_DIR}/crypto/chacha/chacha.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/cipher_extra.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/derive_key.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesccm.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesctrhmac.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesgcmsiv.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_chacha20poly1305.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_null.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_rc2.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_rc4.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_tls.c - ${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/tls_cbc.c - ${BORINGSSL_SOURCE_DIR}/crypto/cmac/cmac.c - ${BORINGSSL_SOURCE_DIR}/crypto/conf/conf.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-fuchsia.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-linux.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-arm-linux.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-arm.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-intel.c - ${BORINGSSL_SOURCE_DIR}/crypto/cpu-ppc64le.c - ${BORINGSSL_SOURCE_DIR}/crypto/crypto.c - ${BORINGSSL_SOURCE_DIR}/crypto/curve25519/curve25519.c - ${BORINGSSL_SOURCE_DIR}/crypto/curve25519/spake25519.c - ${BORINGSSL_SOURCE_DIR}/crypto/dh_extra/dh_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/dh_extra/params.c - ${BORINGSSL_SOURCE_DIR}/crypto/digest_extra/digest_extra.c - ${BORINGSSL_SOURCE_DIR}/crypto/dsa/dsa.c - ${BORINGSSL_SOURCE_DIR}/crypto/dsa/dsa_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/ec_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/ec_derive.c - ${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/hash_to_curve.c - ${BORINGSSL_SOURCE_DIR}/crypto/ecdh_extra/ecdh_extra.c - ${BORINGSSL_SOURCE_DIR}/crypto/ecdsa_extra/ecdsa_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/engine/engine.c - ${BORINGSSL_SOURCE_DIR}/crypto/err/err.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/digestsign.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/evp.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/evp_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/evp_ctx.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_dsa_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ec.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ec_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ed25519.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ed25519_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_rsa.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_rsa_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_x25519.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/p_x25519_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/pbkdf.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/print.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/scrypt.c - ${BORINGSSL_SOURCE_DIR}/crypto/evp/sign.c - ${BORINGSSL_SOURCE_DIR}/crypto/ex_data.c - ${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/bcm.c - ${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/fips_shared_support.c - ${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/is_fips.c - ${BORINGSSL_SOURCE_DIR}/crypto/hkdf/hkdf.c - ${BORINGSSL_SOURCE_DIR}/crypto/hpke/hpke.c - ${BORINGSSL_SOURCE_DIR}/crypto/hrss/hrss.c - ${BORINGSSL_SOURCE_DIR}/crypto/lhash/lhash.c - ${BORINGSSL_SOURCE_DIR}/crypto/mem.c - ${BORINGSSL_SOURCE_DIR}/crypto/obj/obj.c - ${BORINGSSL_SOURCE_DIR}/crypto/obj/obj_xref.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_all.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_info.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_lib.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_oth.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_pk8.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_pkey.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_xaux.c - ${BORINGSSL_SOURCE_DIR}/crypto/pkcs7/pkcs7.c - ${BORINGSSL_SOURCE_DIR}/crypto/pkcs7/pkcs7_x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/p5_pbev2.c - ${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/pkcs8.c - ${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/pkcs8_x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305.c - ${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_arm.c - ${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_vec.c - ${BORINGSSL_SOURCE_DIR}/crypto/pool/pool.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/deterministic.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/forkunsafe.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/fuchsia.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/passive.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/rand_extra.c - ${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/windows.c - ${BORINGSSL_SOURCE_DIR}/crypto/rc4/rc4.c - ${BORINGSSL_SOURCE_DIR}/crypto/refcount_c11.c - ${BORINGSSL_SOURCE_DIR}/crypto/refcount_lock.c - ${BORINGSSL_SOURCE_DIR}/crypto/rsa_extra/rsa_asn1.c - ${BORINGSSL_SOURCE_DIR}/crypto/rsa_extra/rsa_print.c - ${BORINGSSL_SOURCE_DIR}/crypto/siphash/siphash.c - ${BORINGSSL_SOURCE_DIR}/crypto/stack/stack.c - ${BORINGSSL_SOURCE_DIR}/crypto/thread.c - ${BORINGSSL_SOURCE_DIR}/crypto/thread_none.c - ${BORINGSSL_SOURCE_DIR}/crypto/thread_pthread.c - ${BORINGSSL_SOURCE_DIR}/crypto/thread_win.c - ${BORINGSSL_SOURCE_DIR}/crypto/trust_token/pmbtoken.c - ${BORINGSSL_SOURCE_DIR}/crypto/trust_token/trust_token.c - ${BORINGSSL_SOURCE_DIR}/crypto/trust_token/voprf.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/a_digest.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/a_sign.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/a_strex.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/a_verify.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/algorithm.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/asn1_gen.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/by_dir.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/by_file.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/i2d_pr.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/rsa_pss.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/t_crl.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/t_req.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/t_x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/t_x509a.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_att.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_cmp.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_d2.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_def.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_ext.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_lu.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_obj.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_r2x.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_req.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_set.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_trs.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_txt.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_v3.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_vfy.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_vpm.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509cset.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509name.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509rset.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x509spki.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_algor.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_all.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_attrib.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_crl.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_exten.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_info.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_name.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_pkey.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_pubkey.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_req.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_sig.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_spki.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_val.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_x509.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509/x_x509a.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_cache.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_data.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_lib.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_map.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_node.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_tree.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_akey.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_akeya.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_alt.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_bcons.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_bitst.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_conf.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_cpols.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_crld.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_enum.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_extku.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_genn.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ia5.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_info.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_int.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_lib.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ncons.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ocsp.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pci.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pcia.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pcons.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pmaps.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_prn.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_purp.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_skey.c - ${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_utl.c + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_bitstr.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_bool.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_d2i_fp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_dup.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_enum.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_gentm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_i2d_fp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_int.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_mbstr.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_object.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_octet.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_print.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_strnid.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_time.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_type.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_utctm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/a_utf8.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn1_lib.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn1_par.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/asn_pack.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_enum.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_int.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/f_string.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_dec.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_enc.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_fre.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_new.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_typ.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/tasn_utl.c" + "${BORINGSSL_SOURCE_DIR}/crypto/asn1/time_support.c" + "${BORINGSSL_SOURCE_DIR}/crypto/base64/base64.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/bio.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/bio_mem.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/connect.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/fd.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/file.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/hexdump.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/pair.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/printf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/socket.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bio/socket_helper.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bn_extra/bn_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bn_extra/convert.c" + "${BORINGSSL_SOURCE_DIR}/crypto/buf/buf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bytestring/asn1_compat.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bytestring/ber.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bytestring/cbb.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bytestring/cbs.c" + "${BORINGSSL_SOURCE_DIR}/crypto/bytestring/unicode.c" + "${BORINGSSL_SOURCE_DIR}/crypto/chacha/chacha.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/cipher_extra.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/derive_key.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesccm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesctrhmac.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_aesgcmsiv.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_chacha20poly1305.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_null.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_rc2.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_rc4.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/e_tls.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cipher_extra/tls_cbc.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cmac/cmac.c" + "${BORINGSSL_SOURCE_DIR}/crypto/conf/conf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-fuchsia.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-aarch64-linux.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-arm-linux.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-arm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-intel.c" + "${BORINGSSL_SOURCE_DIR}/crypto/cpu-ppc64le.c" + "${BORINGSSL_SOURCE_DIR}/crypto/crypto.c" + "${BORINGSSL_SOURCE_DIR}/crypto/curve25519/curve25519.c" + "${BORINGSSL_SOURCE_DIR}/crypto/curve25519/spake25519.c" + "${BORINGSSL_SOURCE_DIR}/crypto/dh_extra/dh_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/dh_extra/params.c" + "${BORINGSSL_SOURCE_DIR}/crypto/digest_extra/digest_extra.c" + "${BORINGSSL_SOURCE_DIR}/crypto/dsa/dsa.c" + "${BORINGSSL_SOURCE_DIR}/crypto/dsa/dsa_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/ec_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/ec_derive.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ec_extra/hash_to_curve.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ecdh_extra/ecdh_extra.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ecdsa_extra/ecdsa_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/engine/engine.c" + "${BORINGSSL_SOURCE_DIR}/crypto/err/err.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/digestsign.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/evp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/evp_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/evp_ctx.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_dsa_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ec.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ec_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ed25519.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_ed25519_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_rsa.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_rsa_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_x25519.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/p_x25519_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/pbkdf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/print.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/scrypt.c" + "${BORINGSSL_SOURCE_DIR}/crypto/evp/sign.c" + "${BORINGSSL_SOURCE_DIR}/crypto/ex_data.c" + "${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/bcm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/fips_shared_support.c" + "${BORINGSSL_SOURCE_DIR}/crypto/fipsmodule/is_fips.c" + "${BORINGSSL_SOURCE_DIR}/crypto/hkdf/hkdf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/hpke/hpke.c" + "${BORINGSSL_SOURCE_DIR}/crypto/hrss/hrss.c" + "${BORINGSSL_SOURCE_DIR}/crypto/lhash/lhash.c" + "${BORINGSSL_SOURCE_DIR}/crypto/mem.c" + "${BORINGSSL_SOURCE_DIR}/crypto/obj/obj.c" + "${BORINGSSL_SOURCE_DIR}/crypto/obj/obj_xref.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_all.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_info.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_lib.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_oth.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_pk8.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_pkey.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pem/pem_xaux.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pkcs7/pkcs7.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pkcs7/pkcs7_x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/p5_pbev2.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/pkcs8.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pkcs8/pkcs8_x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305.c" + "${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_arm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/poly1305/poly1305_vec.c" + "${BORINGSSL_SOURCE_DIR}/crypto/pool/pool.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/deterministic.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/forkunsafe.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/fuchsia.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/passive.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/rand_extra.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rand_extra/windows.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rc4/rc4.c" + "${BORINGSSL_SOURCE_DIR}/crypto/refcount_c11.c" + "${BORINGSSL_SOURCE_DIR}/crypto/refcount_lock.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rsa_extra/rsa_asn1.c" + "${BORINGSSL_SOURCE_DIR}/crypto/rsa_extra/rsa_print.c" + "${BORINGSSL_SOURCE_DIR}/crypto/siphash/siphash.c" + "${BORINGSSL_SOURCE_DIR}/crypto/stack/stack.c" + "${BORINGSSL_SOURCE_DIR}/crypto/thread.c" + "${BORINGSSL_SOURCE_DIR}/crypto/thread_none.c" + "${BORINGSSL_SOURCE_DIR}/crypto/thread_pthread.c" + "${BORINGSSL_SOURCE_DIR}/crypto/thread_win.c" + "${BORINGSSL_SOURCE_DIR}/crypto/trust_token/pmbtoken.c" + "${BORINGSSL_SOURCE_DIR}/crypto/trust_token/trust_token.c" + "${BORINGSSL_SOURCE_DIR}/crypto/trust_token/voprf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/a_digest.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/a_sign.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/a_strex.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/a_verify.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/algorithm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/asn1_gen.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/by_dir.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/by_file.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/i2d_pr.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/rsa_pss.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/t_crl.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/t_req.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/t_x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/t_x509a.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_att.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_cmp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_d2.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_def.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_ext.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_lu.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_obj.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_r2x.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_req.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_set.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_trs.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_txt.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_v3.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_vfy.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509_vpm.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509cset.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509name.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509rset.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x509spki.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_algor.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_all.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_attrib.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_crl.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_exten.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_info.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_name.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_pkey.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_pubkey.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_req.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_sig.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_spki.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_val.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_x509.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509/x_x509a.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_cache.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_data.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_lib.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_map.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_node.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/pcy_tree.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_akey.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_akeya.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_alt.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_bcons.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_bitst.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_conf.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_cpols.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_crld.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_enum.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_extku.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_genn.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ia5.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_info.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_int.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_lib.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ncons.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_ocsp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pci.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pcia.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pcons.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_pmaps.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_prn.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_purp.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_skey.c" + "${BORINGSSL_SOURCE_DIR}/crypto/x509v3/v3_utl.c" ) add_library( ssl - ${BORINGSSL_SOURCE_DIR}/ssl/bio_ssl.cc - ${BORINGSSL_SOURCE_DIR}/ssl/d1_both.cc - ${BORINGSSL_SOURCE_DIR}/ssl/d1_lib.cc - ${BORINGSSL_SOURCE_DIR}/ssl/d1_pkt.cc - ${BORINGSSL_SOURCE_DIR}/ssl/d1_srtp.cc - ${BORINGSSL_SOURCE_DIR}/ssl/dtls_method.cc - ${BORINGSSL_SOURCE_DIR}/ssl/dtls_record.cc - ${BORINGSSL_SOURCE_DIR}/ssl/handoff.cc - ${BORINGSSL_SOURCE_DIR}/ssl/handshake.cc - ${BORINGSSL_SOURCE_DIR}/ssl/handshake_client.cc - ${BORINGSSL_SOURCE_DIR}/ssl/handshake_server.cc - ${BORINGSSL_SOURCE_DIR}/ssl/s3_both.cc - ${BORINGSSL_SOURCE_DIR}/ssl/s3_lib.cc - ${BORINGSSL_SOURCE_DIR}/ssl/s3_pkt.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_aead_ctx.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_asn1.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_buffer.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_cert.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_cipher.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_file.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_key_share.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_lib.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_privkey.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_session.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_stat.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_transcript.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_versions.cc - ${BORINGSSL_SOURCE_DIR}/ssl/ssl_x509.cc - ${BORINGSSL_SOURCE_DIR}/ssl/t1_enc.cc - ${BORINGSSL_SOURCE_DIR}/ssl/t1_lib.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls13_both.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls13_client.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls13_enc.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls13_server.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls_method.cc - ${BORINGSSL_SOURCE_DIR}/ssl/tls_record.cc + "${BORINGSSL_SOURCE_DIR}/ssl/bio_ssl.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/d1_both.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/d1_lib.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/d1_pkt.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/d1_srtp.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/dtls_method.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/dtls_record.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/handoff.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/handshake.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/handshake_client.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/handshake_server.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/s3_both.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/s3_lib.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/s3_pkt.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_aead_ctx.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_asn1.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_buffer.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_cert.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_cipher.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_file.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_key_share.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_lib.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_privkey.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_session.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_stat.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_transcript.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_versions.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/ssl_x509.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/t1_enc.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/t1_lib.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls13_both.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls13_client.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls13_enc.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls13_server.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls_method.cc" + "${BORINGSSL_SOURCE_DIR}/ssl/tls_record.cc" - ${BORINGSSL_SOURCE_DIR}/decrepit/ssl/ssl_decrepit.c - ${BORINGSSL_SOURCE_DIR}/decrepit/cfb/cfb.c + "${BORINGSSL_SOURCE_DIR}/decrepit/ssl/ssl_decrepit.c" + "${BORINGSSL_SOURCE_DIR}/decrepit/cfb/cfb.c" ) add_executable( bssl - ${BORINGSSL_SOURCE_DIR}/tool/args.cc - ${BORINGSSL_SOURCE_DIR}/tool/ciphers.cc - ${BORINGSSL_SOURCE_DIR}/tool/client.cc - ${BORINGSSL_SOURCE_DIR}/tool/const.cc - ${BORINGSSL_SOURCE_DIR}/tool/digest.cc - ${BORINGSSL_SOURCE_DIR}/tool/fd.cc - ${BORINGSSL_SOURCE_DIR}/tool/file.cc - ${BORINGSSL_SOURCE_DIR}/tool/generate_ed25519.cc - ${BORINGSSL_SOURCE_DIR}/tool/genrsa.cc - ${BORINGSSL_SOURCE_DIR}/tool/pkcs12.cc - ${BORINGSSL_SOURCE_DIR}/tool/rand.cc - ${BORINGSSL_SOURCE_DIR}/tool/server.cc - ${BORINGSSL_SOURCE_DIR}/tool/sign.cc - ${BORINGSSL_SOURCE_DIR}/tool/speed.cc - ${BORINGSSL_SOURCE_DIR}/tool/tool.cc - ${BORINGSSL_SOURCE_DIR}/tool/transport_common.cc + "${BORINGSSL_SOURCE_DIR}/tool/args.cc" + "${BORINGSSL_SOURCE_DIR}/tool/ciphers.cc" + "${BORINGSSL_SOURCE_DIR}/tool/client.cc" + "${BORINGSSL_SOURCE_DIR}/tool/const.cc" + "${BORINGSSL_SOURCE_DIR}/tool/digest.cc" + "${BORINGSSL_SOURCE_DIR}/tool/fd.cc" + "${BORINGSSL_SOURCE_DIR}/tool/file.cc" + "${BORINGSSL_SOURCE_DIR}/tool/generate_ed25519.cc" + "${BORINGSSL_SOURCE_DIR}/tool/genrsa.cc" + "${BORINGSSL_SOURCE_DIR}/tool/pkcs12.cc" + "${BORINGSSL_SOURCE_DIR}/tool/rand.cc" + "${BORINGSSL_SOURCE_DIR}/tool/server.cc" + "${BORINGSSL_SOURCE_DIR}/tool/sign.cc" + "${BORINGSSL_SOURCE_DIR}/tool/speed.cc" + "${BORINGSSL_SOURCE_DIR}/tool/tool.cc" + "${BORINGSSL_SOURCE_DIR}/tool/transport_common.cc" ) target_link_libraries(ssl crypto) @@ -655,7 +655,7 @@ if(WIN32) target_link_libraries(bssl ws2_32) endif() -target_include_directories(crypto SYSTEM PUBLIC ${BORINGSSL_SOURCE_DIR}/include) -target_include_directories(ssl SYSTEM PUBLIC ${BORINGSSL_SOURCE_DIR}/include) +target_include_directories(crypto SYSTEM PUBLIC "${BORINGSSL_SOURCE_DIR}/include") +target_include_directories(ssl SYSTEM PUBLIC "${BORINGSSL_SOURCE_DIR}/include") target_compile_options(crypto PRIVATE -Wno-gnu-anonymous-struct) diff --git a/contrib/brotli-cmake/CMakeLists.txt b/contrib/brotli-cmake/CMakeLists.txt index 4c5f584de9d..7293cae0665 100644 --- a/contrib/brotli-cmake/CMakeLists.txt +++ b/contrib/brotli-cmake/CMakeLists.txt @@ -1,41 +1,41 @@ -set(BROTLI_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/brotli/c) -set(BROTLI_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/brotli/c) +set(BROTLI_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/brotli/c") +set(BROTLI_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/brotli/c") set(SRCS - ${BROTLI_SOURCE_DIR}/enc/command.c - ${BROTLI_SOURCE_DIR}/enc/fast_log.c - ${BROTLI_SOURCE_DIR}/dec/bit_reader.c - ${BROTLI_SOURCE_DIR}/dec/state.c - ${BROTLI_SOURCE_DIR}/dec/huffman.c - ${BROTLI_SOURCE_DIR}/dec/decode.c - ${BROTLI_SOURCE_DIR}/enc/encode.c - ${BROTLI_SOURCE_DIR}/enc/dictionary_hash.c - ${BROTLI_SOURCE_DIR}/enc/cluster.c - ${BROTLI_SOURCE_DIR}/enc/entropy_encode.c - ${BROTLI_SOURCE_DIR}/enc/literal_cost.c - ${BROTLI_SOURCE_DIR}/enc/compress_fragment_two_pass.c - ${BROTLI_SOURCE_DIR}/enc/static_dict.c - ${BROTLI_SOURCE_DIR}/enc/compress_fragment.c - ${BROTLI_SOURCE_DIR}/enc/block_splitter.c - ${BROTLI_SOURCE_DIR}/enc/backward_references_hq.c - ${BROTLI_SOURCE_DIR}/enc/histogram.c - ${BROTLI_SOURCE_DIR}/enc/brotli_bit_stream.c - ${BROTLI_SOURCE_DIR}/enc/utf8_util.c - ${BROTLI_SOURCE_DIR}/enc/encoder_dict.c - ${BROTLI_SOURCE_DIR}/enc/backward_references.c - ${BROTLI_SOURCE_DIR}/enc/bit_cost.c - ${BROTLI_SOURCE_DIR}/enc/metablock.c - ${BROTLI_SOURCE_DIR}/enc/memory.c - ${BROTLI_SOURCE_DIR}/common/dictionary.c - ${BROTLI_SOURCE_DIR}/common/transform.c - ${BROTLI_SOURCE_DIR}/common/platform.c - ${BROTLI_SOURCE_DIR}/common/context.c - ${BROTLI_SOURCE_DIR}/common/constants.c + "${BROTLI_SOURCE_DIR}/enc/command.c" + "${BROTLI_SOURCE_DIR}/enc/fast_log.c" + "${BROTLI_SOURCE_DIR}/dec/bit_reader.c" + "${BROTLI_SOURCE_DIR}/dec/state.c" + "${BROTLI_SOURCE_DIR}/dec/huffman.c" + "${BROTLI_SOURCE_DIR}/dec/decode.c" + "${BROTLI_SOURCE_DIR}/enc/encode.c" + "${BROTLI_SOURCE_DIR}/enc/dictionary_hash.c" + "${BROTLI_SOURCE_DIR}/enc/cluster.c" + "${BROTLI_SOURCE_DIR}/enc/entropy_encode.c" + "${BROTLI_SOURCE_DIR}/enc/literal_cost.c" + "${BROTLI_SOURCE_DIR}/enc/compress_fragment_two_pass.c" + "${BROTLI_SOURCE_DIR}/enc/static_dict.c" + "${BROTLI_SOURCE_DIR}/enc/compress_fragment.c" + "${BROTLI_SOURCE_DIR}/enc/block_splitter.c" + "${BROTLI_SOURCE_DIR}/enc/backward_references_hq.c" + "${BROTLI_SOURCE_DIR}/enc/histogram.c" + "${BROTLI_SOURCE_DIR}/enc/brotli_bit_stream.c" + "${BROTLI_SOURCE_DIR}/enc/utf8_util.c" + "${BROTLI_SOURCE_DIR}/enc/encoder_dict.c" + "${BROTLI_SOURCE_DIR}/enc/backward_references.c" + "${BROTLI_SOURCE_DIR}/enc/bit_cost.c" + "${BROTLI_SOURCE_DIR}/enc/metablock.c" + "${BROTLI_SOURCE_DIR}/enc/memory.c" + "${BROTLI_SOURCE_DIR}/common/dictionary.c" + "${BROTLI_SOURCE_DIR}/common/transform.c" + "${BROTLI_SOURCE_DIR}/common/platform.c" + "${BROTLI_SOURCE_DIR}/common/context.c" + "${BROTLI_SOURCE_DIR}/common/constants.c" ) add_library(brotli ${SRCS}) -target_include_directories(brotli PUBLIC ${BROTLI_SOURCE_DIR}/include) +target_include_directories(brotli PUBLIC "${BROTLI_SOURCE_DIR}/include") if(M_LIBRARY) target_link_libraries(brotli PRIVATE ${M_LIBRARY}) diff --git a/contrib/capnproto-cmake/CMakeLists.txt b/contrib/capnproto-cmake/CMakeLists.txt index 949481e7ef5..9f6e076cc7d 100644 --- a/contrib/capnproto-cmake/CMakeLists.txt +++ b/contrib/capnproto-cmake/CMakeLists.txt @@ -1,53 +1,53 @@ -set (CAPNPROTO_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/capnproto/c++/src) +set (CAPNPROTO_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/capnproto/c++/src") set (CMAKE_CXX_STANDARD 17) set (KJ_SRCS - ${CAPNPROTO_SOURCE_DIR}/kj/array.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/common.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/debug.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/exception.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/io.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/memory.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/mutex.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/string.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/hash.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/table.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/thread.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/main.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/arena.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/test-helpers.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/units.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/encoding.c++ + "${CAPNPROTO_SOURCE_DIR}/kj/array.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/common.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/debug.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/exception.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/io.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/memory.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/mutex.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/string.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/hash.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/table.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/thread.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/main.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/arena.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/test-helpers.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/units.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/encoding.c++" - ${CAPNPROTO_SOURCE_DIR}/kj/refcount.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/string-tree.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/time.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/filesystem.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-unix.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-win32.c++ - ${CAPNPROTO_SOURCE_DIR}/kj/parse/char.c++ + "${CAPNPROTO_SOURCE_DIR}/kj/refcount.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/string-tree.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/time.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/filesystem.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-unix.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/filesystem-disk-win32.c++" + "${CAPNPROTO_SOURCE_DIR}/kj/parse/char.c++" ) add_library(kj ${KJ_SRCS}) target_include_directories(kj SYSTEM PUBLIC ${CAPNPROTO_SOURCE_DIR}) set (CAPNP_SRCS - ${CAPNPROTO_SOURCE_DIR}/capnp/c++.capnp.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/blob.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/arena.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/layout.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/list.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/any.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/message.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/schema.capnp.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/serialize.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/serialize-packed.c++ + "${CAPNPROTO_SOURCE_DIR}/capnp/c++.capnp.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/blob.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/arena.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/layout.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/list.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/any.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/message.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/schema.capnp.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/serialize.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/serialize-packed.c++" - ${CAPNPROTO_SOURCE_DIR}/capnp/schema.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/schema-loader.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/dynamic.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/stringify.c++ + "${CAPNPROTO_SOURCE_DIR}/capnp/schema.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/schema-loader.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/dynamic.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/stringify.c++" ) add_library(capnp ${CAPNP_SRCS}) @@ -57,16 +57,16 @@ set_target_properties(capnp target_link_libraries(capnp PUBLIC kj) set (CAPNPC_SRCS - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/type-id.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/error-reporter.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.capnp.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/grammar.capnp.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/parser.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/node-translator.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/compiler/compiler.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/schema-parser.c++ - ${CAPNPROTO_SOURCE_DIR}/capnp/serialize-text.c++ + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/type-id.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/error-reporter.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.capnp.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/lexer.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/grammar.capnp.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/parser.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/node-translator.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/compiler/compiler.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/schema-parser.c++" + "${CAPNPROTO_SOURCE_DIR}/capnp/serialize-text.c++" ) add_library(capnpc ${CAPNPC_SRCS}) diff --git a/contrib/cctz-cmake/CMakeLists.txt b/contrib/cctz-cmake/CMakeLists.txt index a3869478347..93413693796 100644 --- a/contrib/cctz-cmake/CMakeLists.txt +++ b/contrib/cctz-cmake/CMakeLists.txt @@ -40,23 +40,23 @@ endif() if (NOT EXTERNAL_CCTZ_LIBRARY_FOUND OR NOT EXTERNAL_CCTZ_LIBRARY_WORKS) set(USE_INTERNAL_CCTZ_LIBRARY 1) - set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/cctz) + set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/cctz") set (SRCS - ${LIBRARY_DIR}/src/civil_time_detail.cc - ${LIBRARY_DIR}/src/time_zone_fixed.cc - ${LIBRARY_DIR}/src/time_zone_format.cc - ${LIBRARY_DIR}/src/time_zone_if.cc - ${LIBRARY_DIR}/src/time_zone_impl.cc - ${LIBRARY_DIR}/src/time_zone_info.cc - ${LIBRARY_DIR}/src/time_zone_libc.cc - ${LIBRARY_DIR}/src/time_zone_lookup.cc - ${LIBRARY_DIR}/src/time_zone_posix.cc - ${LIBRARY_DIR}/src/zone_info_source.cc + "${LIBRARY_DIR}/src/civil_time_detail.cc" + "${LIBRARY_DIR}/src/time_zone_fixed.cc" + "${LIBRARY_DIR}/src/time_zone_format.cc" + "${LIBRARY_DIR}/src/time_zone_if.cc" + "${LIBRARY_DIR}/src/time_zone_impl.cc" + "${LIBRARY_DIR}/src/time_zone_info.cc" + "${LIBRARY_DIR}/src/time_zone_libc.cc" + "${LIBRARY_DIR}/src/time_zone_lookup.cc" + "${LIBRARY_DIR}/src/time_zone_posix.cc" + "${LIBRARY_DIR}/src/zone_info_source.cc" ) add_library (cctz ${SRCS}) - target_include_directories (cctz PUBLIC ${LIBRARY_DIR}/include) + target_include_directories (cctz PUBLIC "${LIBRARY_DIR}/include") if (OS_FREEBSD) # yes, need linux, because bsd check inside linux in time_zone_libc.cc:24 @@ -73,8 +73,8 @@ if (NOT EXTERNAL_CCTZ_LIBRARY_FOUND OR NOT EXTERNAL_CCTZ_LIBRARY_WORKS) # Build a libray with embedded tzdata if (OS_LINUX) # get the list of timezones from tzdata shipped with cctz - set(TZDIR ${LIBRARY_DIR}/testdata/zoneinfo) - file(STRINGS ${LIBRARY_DIR}/testdata/version TZDATA_VERSION) + set(TZDIR "${LIBRARY_DIR}/testdata/zoneinfo") + file(STRINGS "${LIBRARY_DIR}/testdata/version" TZDATA_VERSION) set_property(GLOBAL PROPERTY TZDATA_VERSION_PROP "${TZDATA_VERSION}") message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}") @@ -100,15 +100,15 @@ if (NOT EXTERNAL_CCTZ_LIBRARY_FOUND OR NOT EXTERNAL_CCTZ_LIBRARY_WORKS) # PPC64LE fails to do this with objcopy, use ld or lld instead if (ARCH_PPC64LE) add_custom_command(OUTPUT ${TZ_OBJ} - COMMAND cp ${TZDIR}/${TIMEZONE} ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID} + COMMAND cp "${TZDIR}/${TIMEZONE}" "${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}" COMMAND cd ${CMAKE_CURRENT_BINARY_DIR} && ${CMAKE_LINKER} -m elf64lppc -r -b binary -o ${TZ_OBJ} ${TIMEZONE_ID} - COMMAND rm ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}) + COMMAND rm "${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}") else() add_custom_command(OUTPUT ${TZ_OBJ} - COMMAND cp ${TZDIR}/${TIMEZONE} ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID} + COMMAND cp "${TZDIR}/${TIMEZONE}" "${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}" COMMAND cd ${CMAKE_CURRENT_BINARY_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} --rename-section .data=.rodata,alloc,load,readonly,data,contents ${TIMEZONE_ID} ${TZ_OBJ} - COMMAND rm ${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}) + COMMAND rm "${CMAKE_CURRENT_BINARY_DIR}/${TIMEZONE_ID}") endif() set_source_files_properties(${TZ_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true) endforeach(TIMEZONE) diff --git a/contrib/cppkafka b/contrib/cppkafka index b06e64ef5bf..57a599d99c5 160000 --- a/contrib/cppkafka +++ b/contrib/cppkafka @@ -1 +1 @@ -Subproject commit b06e64ef5bffd636d918a742c689f69130c1dbab +Subproject commit 57a599d99c540e647bcd0eb9ea77c523cca011b3 diff --git a/contrib/cppkafka-cmake/CMakeLists.txt b/contrib/cppkafka-cmake/CMakeLists.txt index 9f512974948..0bc33ada529 100644 --- a/contrib/cppkafka-cmake/CMakeLists.txt +++ b/contrib/cppkafka-cmake/CMakeLists.txt @@ -1,25 +1,25 @@ -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/cppkafka) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/cppkafka") set(SRCS - ${LIBRARY_DIR}/src/buffer.cpp - ${LIBRARY_DIR}/src/configuration_option.cpp - ${LIBRARY_DIR}/src/configuration.cpp - ${LIBRARY_DIR}/src/consumer.cpp - ${LIBRARY_DIR}/src/error.cpp - ${LIBRARY_DIR}/src/event.cpp - ${LIBRARY_DIR}/src/exceptions.cpp - ${LIBRARY_DIR}/src/group_information.cpp - ${LIBRARY_DIR}/src/kafka_handle_base.cpp - ${LIBRARY_DIR}/src/message_internal.cpp - ${LIBRARY_DIR}/src/message_timestamp.cpp - ${LIBRARY_DIR}/src/message.cpp - ${LIBRARY_DIR}/src/metadata.cpp - ${LIBRARY_DIR}/src/producer.cpp - ${LIBRARY_DIR}/src/queue.cpp - ${LIBRARY_DIR}/src/topic_configuration.cpp - ${LIBRARY_DIR}/src/topic_partition_list.cpp - ${LIBRARY_DIR}/src/topic_partition.cpp - ${LIBRARY_DIR}/src/topic.cpp + "${LIBRARY_DIR}/src/buffer.cpp" + "${LIBRARY_DIR}/src/configuration_option.cpp" + "${LIBRARY_DIR}/src/configuration.cpp" + "${LIBRARY_DIR}/src/consumer.cpp" + "${LIBRARY_DIR}/src/error.cpp" + "${LIBRARY_DIR}/src/event.cpp" + "${LIBRARY_DIR}/src/exceptions.cpp" + "${LIBRARY_DIR}/src/group_information.cpp" + "${LIBRARY_DIR}/src/kafka_handle_base.cpp" + "${LIBRARY_DIR}/src/message_internal.cpp" + "${LIBRARY_DIR}/src/message_timestamp.cpp" + "${LIBRARY_DIR}/src/message.cpp" + "${LIBRARY_DIR}/src/metadata.cpp" + "${LIBRARY_DIR}/src/producer.cpp" + "${LIBRARY_DIR}/src/queue.cpp" + "${LIBRARY_DIR}/src/topic_configuration.cpp" + "${LIBRARY_DIR}/src/topic_partition_list.cpp" + "${LIBRARY_DIR}/src/topic_partition.cpp" + "${LIBRARY_DIR}/src/topic.cpp" ) add_library(cppkafka ${SRCS}) @@ -29,5 +29,5 @@ target_link_libraries(cppkafka ${RDKAFKA_LIBRARY} boost::headers_only ) -target_include_directories(cppkafka PRIVATE ${LIBRARY_DIR}/include/cppkafka) -target_include_directories(cppkafka SYSTEM BEFORE PUBLIC ${LIBRARY_DIR}/include) +target_include_directories(cppkafka PRIVATE "${LIBRARY_DIR}/include/cppkafka") +target_include_directories(cppkafka SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/include") diff --git a/contrib/croaring-cmake/CMakeLists.txt b/contrib/croaring-cmake/CMakeLists.txt index 8a8ca62e051..f4a5d8a01dc 100644 --- a/contrib/croaring-cmake/CMakeLists.txt +++ b/contrib/croaring-cmake/CMakeLists.txt @@ -1,26 +1,26 @@ -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/croaring) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/croaring") set(SRCS - ${LIBRARY_DIR}/src/array_util.c - ${LIBRARY_DIR}/src/bitset_util.c - ${LIBRARY_DIR}/src/containers/array.c - ${LIBRARY_DIR}/src/containers/bitset.c - ${LIBRARY_DIR}/src/containers/containers.c - ${LIBRARY_DIR}/src/containers/convert.c - ${LIBRARY_DIR}/src/containers/mixed_intersection.c - ${LIBRARY_DIR}/src/containers/mixed_union.c - ${LIBRARY_DIR}/src/containers/mixed_equal.c - ${LIBRARY_DIR}/src/containers/mixed_subset.c - ${LIBRARY_DIR}/src/containers/mixed_negation.c - ${LIBRARY_DIR}/src/containers/mixed_xor.c - ${LIBRARY_DIR}/src/containers/mixed_andnot.c - ${LIBRARY_DIR}/src/containers/run.c - ${LIBRARY_DIR}/src/roaring.c - ${LIBRARY_DIR}/src/roaring_priority_queue.c - ${LIBRARY_DIR}/src/roaring_array.c) + "${LIBRARY_DIR}/src/array_util.c" + "${LIBRARY_DIR}/src/bitset_util.c" + "${LIBRARY_DIR}/src/containers/array.c" + "${LIBRARY_DIR}/src/containers/bitset.c" + "${LIBRARY_DIR}/src/containers/containers.c" + "${LIBRARY_DIR}/src/containers/convert.c" + "${LIBRARY_DIR}/src/containers/mixed_intersection.c" + "${LIBRARY_DIR}/src/containers/mixed_union.c" + "${LIBRARY_DIR}/src/containers/mixed_equal.c" + "${LIBRARY_DIR}/src/containers/mixed_subset.c" + "${LIBRARY_DIR}/src/containers/mixed_negation.c" + "${LIBRARY_DIR}/src/containers/mixed_xor.c" + "${LIBRARY_DIR}/src/containers/mixed_andnot.c" + "${LIBRARY_DIR}/src/containers/run.c" + "${LIBRARY_DIR}/src/roaring.c" + "${LIBRARY_DIR}/src/roaring_priority_queue.c" + "${LIBRARY_DIR}/src/roaring_array.c") add_library(roaring ${SRCS}) -target_include_directories(roaring PRIVATE ${LIBRARY_DIR}/include/roaring) -target_include_directories(roaring SYSTEM BEFORE PUBLIC ${LIBRARY_DIR}/include) -target_include_directories(roaring SYSTEM BEFORE PUBLIC ${LIBRARY_DIR}/cpp) +target_include_directories(roaring PRIVATE "${LIBRARY_DIR}/include/roaring") +target_include_directories(roaring SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/include") +target_include_directories(roaring SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/cpp") diff --git a/contrib/curl-cmake/CMakeLists.txt b/contrib/curl-cmake/CMakeLists.txt index a24c9fa8765..1f7449af914 100644 --- a/contrib/curl-cmake/CMakeLists.txt +++ b/contrib/curl-cmake/CMakeLists.txt @@ -5,143 +5,143 @@ endif() set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/curl") set (SRCS - ${LIBRARY_DIR}/lib/file.c - ${LIBRARY_DIR}/lib/timeval.c - ${LIBRARY_DIR}/lib/base64.c - ${LIBRARY_DIR}/lib/hostip.c - ${LIBRARY_DIR}/lib/progress.c - ${LIBRARY_DIR}/lib/formdata.c - ${LIBRARY_DIR}/lib/cookie.c - ${LIBRARY_DIR}/lib/http.c - ${LIBRARY_DIR}/lib/sendf.c - ${LIBRARY_DIR}/lib/url.c - ${LIBRARY_DIR}/lib/dict.c - ${LIBRARY_DIR}/lib/if2ip.c - ${LIBRARY_DIR}/lib/speedcheck.c - ${LIBRARY_DIR}/lib/ldap.c - ${LIBRARY_DIR}/lib/version.c - ${LIBRARY_DIR}/lib/getenv.c - ${LIBRARY_DIR}/lib/escape.c - ${LIBRARY_DIR}/lib/mprintf.c - ${LIBRARY_DIR}/lib/telnet.c - ${LIBRARY_DIR}/lib/netrc.c - ${LIBRARY_DIR}/lib/getinfo.c - ${LIBRARY_DIR}/lib/transfer.c - ${LIBRARY_DIR}/lib/strcase.c - ${LIBRARY_DIR}/lib/easy.c - ${LIBRARY_DIR}/lib/security.c - ${LIBRARY_DIR}/lib/curl_fnmatch.c - ${LIBRARY_DIR}/lib/fileinfo.c - ${LIBRARY_DIR}/lib/wildcard.c - ${LIBRARY_DIR}/lib/krb5.c - ${LIBRARY_DIR}/lib/memdebug.c - ${LIBRARY_DIR}/lib/http_chunks.c - ${LIBRARY_DIR}/lib/strtok.c - ${LIBRARY_DIR}/lib/connect.c - ${LIBRARY_DIR}/lib/llist.c - ${LIBRARY_DIR}/lib/hash.c - ${LIBRARY_DIR}/lib/multi.c - ${LIBRARY_DIR}/lib/content_encoding.c - ${LIBRARY_DIR}/lib/share.c - ${LIBRARY_DIR}/lib/http_digest.c - ${LIBRARY_DIR}/lib/md4.c - ${LIBRARY_DIR}/lib/md5.c - ${LIBRARY_DIR}/lib/http_negotiate.c - ${LIBRARY_DIR}/lib/inet_pton.c - ${LIBRARY_DIR}/lib/strtoofft.c - ${LIBRARY_DIR}/lib/strerror.c - ${LIBRARY_DIR}/lib/amigaos.c - ${LIBRARY_DIR}/lib/hostasyn.c - ${LIBRARY_DIR}/lib/hostip4.c - ${LIBRARY_DIR}/lib/hostip6.c - ${LIBRARY_DIR}/lib/hostsyn.c - ${LIBRARY_DIR}/lib/inet_ntop.c - ${LIBRARY_DIR}/lib/parsedate.c - ${LIBRARY_DIR}/lib/select.c - ${LIBRARY_DIR}/lib/splay.c - ${LIBRARY_DIR}/lib/strdup.c - ${LIBRARY_DIR}/lib/socks.c - ${LIBRARY_DIR}/lib/curl_addrinfo.c - ${LIBRARY_DIR}/lib/socks_gssapi.c - ${LIBRARY_DIR}/lib/socks_sspi.c - ${LIBRARY_DIR}/lib/curl_sspi.c - ${LIBRARY_DIR}/lib/slist.c - ${LIBRARY_DIR}/lib/nonblock.c - ${LIBRARY_DIR}/lib/curl_memrchr.c - ${LIBRARY_DIR}/lib/imap.c - ${LIBRARY_DIR}/lib/pop3.c - ${LIBRARY_DIR}/lib/smtp.c - ${LIBRARY_DIR}/lib/pingpong.c - ${LIBRARY_DIR}/lib/rtsp.c - ${LIBRARY_DIR}/lib/curl_threads.c - ${LIBRARY_DIR}/lib/warnless.c - ${LIBRARY_DIR}/lib/hmac.c - ${LIBRARY_DIR}/lib/curl_rtmp.c - ${LIBRARY_DIR}/lib/openldap.c - ${LIBRARY_DIR}/lib/curl_gethostname.c - ${LIBRARY_DIR}/lib/gopher.c - ${LIBRARY_DIR}/lib/idn_win32.c - ${LIBRARY_DIR}/lib/http_proxy.c - ${LIBRARY_DIR}/lib/non-ascii.c - ${LIBRARY_DIR}/lib/asyn-thread.c - ${LIBRARY_DIR}/lib/curl_gssapi.c - ${LIBRARY_DIR}/lib/http_ntlm.c - ${LIBRARY_DIR}/lib/curl_ntlm_wb.c - ${LIBRARY_DIR}/lib/curl_ntlm_core.c - ${LIBRARY_DIR}/lib/curl_sasl.c - ${LIBRARY_DIR}/lib/rand.c - ${LIBRARY_DIR}/lib/curl_multibyte.c - ${LIBRARY_DIR}/lib/hostcheck.c - ${LIBRARY_DIR}/lib/conncache.c - ${LIBRARY_DIR}/lib/dotdot.c - ${LIBRARY_DIR}/lib/x509asn1.c - ${LIBRARY_DIR}/lib/http2.c - ${LIBRARY_DIR}/lib/smb.c - ${LIBRARY_DIR}/lib/curl_endian.c - ${LIBRARY_DIR}/lib/curl_des.c - ${LIBRARY_DIR}/lib/system_win32.c - ${LIBRARY_DIR}/lib/mime.c - ${LIBRARY_DIR}/lib/sha256.c - ${LIBRARY_DIR}/lib/setopt.c - ${LIBRARY_DIR}/lib/curl_path.c - ${LIBRARY_DIR}/lib/curl_ctype.c - ${LIBRARY_DIR}/lib/curl_range.c - ${LIBRARY_DIR}/lib/psl.c - ${LIBRARY_DIR}/lib/doh.c - ${LIBRARY_DIR}/lib/urlapi.c - ${LIBRARY_DIR}/lib/curl_get_line.c - ${LIBRARY_DIR}/lib/altsvc.c - ${LIBRARY_DIR}/lib/socketpair.c - ${LIBRARY_DIR}/lib/vauth/vauth.c - ${LIBRARY_DIR}/lib/vauth/cleartext.c - ${LIBRARY_DIR}/lib/vauth/cram.c - ${LIBRARY_DIR}/lib/vauth/digest.c - ${LIBRARY_DIR}/lib/vauth/digest_sspi.c - ${LIBRARY_DIR}/lib/vauth/krb5_gssapi.c - ${LIBRARY_DIR}/lib/vauth/krb5_sspi.c - ${LIBRARY_DIR}/lib/vauth/ntlm.c - ${LIBRARY_DIR}/lib/vauth/ntlm_sspi.c - ${LIBRARY_DIR}/lib/vauth/oauth2.c - ${LIBRARY_DIR}/lib/vauth/spnego_gssapi.c - ${LIBRARY_DIR}/lib/vauth/spnego_sspi.c - ${LIBRARY_DIR}/lib/vtls/openssl.c - ${LIBRARY_DIR}/lib/vtls/gtls.c - ${LIBRARY_DIR}/lib/vtls/vtls.c - ${LIBRARY_DIR}/lib/vtls/nss.c - ${LIBRARY_DIR}/lib/vtls/polarssl.c - ${LIBRARY_DIR}/lib/vtls/polarssl_threadlock.c - ${LIBRARY_DIR}/lib/vtls/wolfssl.c - ${LIBRARY_DIR}/lib/vtls/schannel.c - ${LIBRARY_DIR}/lib/vtls/schannel_verify.c - ${LIBRARY_DIR}/lib/vtls/sectransp.c - ${LIBRARY_DIR}/lib/vtls/gskit.c - ${LIBRARY_DIR}/lib/vtls/mbedtls.c - ${LIBRARY_DIR}/lib/vtls/mesalink.c - ${LIBRARY_DIR}/lib/vtls/bearssl.c - ${LIBRARY_DIR}/lib/vquic/ngtcp2.c - ${LIBRARY_DIR}/lib/vquic/quiche.c - ${LIBRARY_DIR}/lib/vssh/libssh2.c - ${LIBRARY_DIR}/lib/vssh/libssh.c + "${LIBRARY_DIR}/lib/file.c" + "${LIBRARY_DIR}/lib/timeval.c" + "${LIBRARY_DIR}/lib/base64.c" + "${LIBRARY_DIR}/lib/hostip.c" + "${LIBRARY_DIR}/lib/progress.c" + "${LIBRARY_DIR}/lib/formdata.c" + "${LIBRARY_DIR}/lib/cookie.c" + "${LIBRARY_DIR}/lib/http.c" + "${LIBRARY_DIR}/lib/sendf.c" + "${LIBRARY_DIR}/lib/url.c" + "${LIBRARY_DIR}/lib/dict.c" + "${LIBRARY_DIR}/lib/if2ip.c" + "${LIBRARY_DIR}/lib/speedcheck.c" + "${LIBRARY_DIR}/lib/ldap.c" + "${LIBRARY_DIR}/lib/version.c" + "${LIBRARY_DIR}/lib/getenv.c" + "${LIBRARY_DIR}/lib/escape.c" + "${LIBRARY_DIR}/lib/mprintf.c" + "${LIBRARY_DIR}/lib/telnet.c" + "${LIBRARY_DIR}/lib/netrc.c" + "${LIBRARY_DIR}/lib/getinfo.c" + "${LIBRARY_DIR}/lib/transfer.c" + "${LIBRARY_DIR}/lib/strcase.c" + "${LIBRARY_DIR}/lib/easy.c" + "${LIBRARY_DIR}/lib/security.c" + "${LIBRARY_DIR}/lib/curl_fnmatch.c" + "${LIBRARY_DIR}/lib/fileinfo.c" + "${LIBRARY_DIR}/lib/wildcard.c" + "${LIBRARY_DIR}/lib/krb5.c" + "${LIBRARY_DIR}/lib/memdebug.c" + "${LIBRARY_DIR}/lib/http_chunks.c" + "${LIBRARY_DIR}/lib/strtok.c" + "${LIBRARY_DIR}/lib/connect.c" + "${LIBRARY_DIR}/lib/llist.c" + "${LIBRARY_DIR}/lib/hash.c" + "${LIBRARY_DIR}/lib/multi.c" + "${LIBRARY_DIR}/lib/content_encoding.c" + "${LIBRARY_DIR}/lib/share.c" + "${LIBRARY_DIR}/lib/http_digest.c" + "${LIBRARY_DIR}/lib/md4.c" + "${LIBRARY_DIR}/lib/md5.c" + "${LIBRARY_DIR}/lib/http_negotiate.c" + "${LIBRARY_DIR}/lib/inet_pton.c" + "${LIBRARY_DIR}/lib/strtoofft.c" + "${LIBRARY_DIR}/lib/strerror.c" + "${LIBRARY_DIR}/lib/amigaos.c" + "${LIBRARY_DIR}/lib/hostasyn.c" + "${LIBRARY_DIR}/lib/hostip4.c" + "${LIBRARY_DIR}/lib/hostip6.c" + "${LIBRARY_DIR}/lib/hostsyn.c" + "${LIBRARY_DIR}/lib/inet_ntop.c" + "${LIBRARY_DIR}/lib/parsedate.c" + "${LIBRARY_DIR}/lib/select.c" + "${LIBRARY_DIR}/lib/splay.c" + "${LIBRARY_DIR}/lib/strdup.c" + "${LIBRARY_DIR}/lib/socks.c" + "${LIBRARY_DIR}/lib/curl_addrinfo.c" + "${LIBRARY_DIR}/lib/socks_gssapi.c" + "${LIBRARY_DIR}/lib/socks_sspi.c" + "${LIBRARY_DIR}/lib/curl_sspi.c" + "${LIBRARY_DIR}/lib/slist.c" + "${LIBRARY_DIR}/lib/nonblock.c" + "${LIBRARY_DIR}/lib/curl_memrchr.c" + "${LIBRARY_DIR}/lib/imap.c" + "${LIBRARY_DIR}/lib/pop3.c" + "${LIBRARY_DIR}/lib/smtp.c" + "${LIBRARY_DIR}/lib/pingpong.c" + "${LIBRARY_DIR}/lib/rtsp.c" + "${LIBRARY_DIR}/lib/curl_threads.c" + "${LIBRARY_DIR}/lib/warnless.c" + "${LIBRARY_DIR}/lib/hmac.c" + "${LIBRARY_DIR}/lib/curl_rtmp.c" + "${LIBRARY_DIR}/lib/openldap.c" + "${LIBRARY_DIR}/lib/curl_gethostname.c" + "${LIBRARY_DIR}/lib/gopher.c" + "${LIBRARY_DIR}/lib/idn_win32.c" + "${LIBRARY_DIR}/lib/http_proxy.c" + "${LIBRARY_DIR}/lib/non-ascii.c" + "${LIBRARY_DIR}/lib/asyn-thread.c" + "${LIBRARY_DIR}/lib/curl_gssapi.c" + "${LIBRARY_DIR}/lib/http_ntlm.c" + "${LIBRARY_DIR}/lib/curl_ntlm_wb.c" + "${LIBRARY_DIR}/lib/curl_ntlm_core.c" + "${LIBRARY_DIR}/lib/curl_sasl.c" + "${LIBRARY_DIR}/lib/rand.c" + "${LIBRARY_DIR}/lib/curl_multibyte.c" + "${LIBRARY_DIR}/lib/hostcheck.c" + "${LIBRARY_DIR}/lib/conncache.c" + "${LIBRARY_DIR}/lib/dotdot.c" + "${LIBRARY_DIR}/lib/x509asn1.c" + "${LIBRARY_DIR}/lib/http2.c" + "${LIBRARY_DIR}/lib/smb.c" + "${LIBRARY_DIR}/lib/curl_endian.c" + "${LIBRARY_DIR}/lib/curl_des.c" + "${LIBRARY_DIR}/lib/system_win32.c" + "${LIBRARY_DIR}/lib/mime.c" + "${LIBRARY_DIR}/lib/sha256.c" + "${LIBRARY_DIR}/lib/setopt.c" + "${LIBRARY_DIR}/lib/curl_path.c" + "${LIBRARY_DIR}/lib/curl_ctype.c" + "${LIBRARY_DIR}/lib/curl_range.c" + "${LIBRARY_DIR}/lib/psl.c" + "${LIBRARY_DIR}/lib/doh.c" + "${LIBRARY_DIR}/lib/urlapi.c" + "${LIBRARY_DIR}/lib/curl_get_line.c" + "${LIBRARY_DIR}/lib/altsvc.c" + "${LIBRARY_DIR}/lib/socketpair.c" + "${LIBRARY_DIR}/lib/vauth/vauth.c" + "${LIBRARY_DIR}/lib/vauth/cleartext.c" + "${LIBRARY_DIR}/lib/vauth/cram.c" + "${LIBRARY_DIR}/lib/vauth/digest.c" + "${LIBRARY_DIR}/lib/vauth/digest_sspi.c" + "${LIBRARY_DIR}/lib/vauth/krb5_gssapi.c" + "${LIBRARY_DIR}/lib/vauth/krb5_sspi.c" + "${LIBRARY_DIR}/lib/vauth/ntlm.c" + "${LIBRARY_DIR}/lib/vauth/ntlm_sspi.c" + "${LIBRARY_DIR}/lib/vauth/oauth2.c" + "${LIBRARY_DIR}/lib/vauth/spnego_gssapi.c" + "${LIBRARY_DIR}/lib/vauth/spnego_sspi.c" + "${LIBRARY_DIR}/lib/vtls/openssl.c" + "${LIBRARY_DIR}/lib/vtls/gtls.c" + "${LIBRARY_DIR}/lib/vtls/vtls.c" + "${LIBRARY_DIR}/lib/vtls/nss.c" + "${LIBRARY_DIR}/lib/vtls/polarssl.c" + "${LIBRARY_DIR}/lib/vtls/polarssl_threadlock.c" + "${LIBRARY_DIR}/lib/vtls/wolfssl.c" + "${LIBRARY_DIR}/lib/vtls/schannel.c" + "${LIBRARY_DIR}/lib/vtls/schannel_verify.c" + "${LIBRARY_DIR}/lib/vtls/sectransp.c" + "${LIBRARY_DIR}/lib/vtls/gskit.c" + "${LIBRARY_DIR}/lib/vtls/mbedtls.c" + "${LIBRARY_DIR}/lib/vtls/mesalink.c" + "${LIBRARY_DIR}/lib/vtls/bearssl.c" + "${LIBRARY_DIR}/lib/vquic/ngtcp2.c" + "${LIBRARY_DIR}/lib/vquic/quiche.c" + "${LIBRARY_DIR}/lib/vssh/libssh2.c" + "${LIBRARY_DIR}/lib/vssh/libssh.c" ) add_library (curl ${SRCS}) @@ -154,8 +154,8 @@ target_compile_definitions (curl PRIVATE OS="${CMAKE_SYSTEM_NAME}" ) target_include_directories (curl PUBLIC - ${LIBRARY_DIR}/include - ${LIBRARY_DIR}/lib + "${LIBRARY_DIR}/include" + "${LIBRARY_DIR}/lib" . # curl_config.h ) @@ -171,8 +171,8 @@ target_compile_options (curl PRIVATE -g0) # - sentry-native set (CURL_FOUND ON CACHE BOOL "") set (CURL_ROOT_DIR ${LIBRARY_DIR} CACHE PATH "") -set (CURL_INCLUDE_DIR ${LIBRARY_DIR}/include CACHE PATH "") -set (CURL_INCLUDE_DIRS ${LIBRARY_DIR}/include CACHE PATH "") +set (CURL_INCLUDE_DIR "${LIBRARY_DIR}/include" CACHE PATH "") +set (CURL_INCLUDE_DIRS "${LIBRARY_DIR}/include" CACHE PATH "") set (CURL_LIBRARY curl CACHE STRING "") set (CURL_LIBRARIES ${CURL_LIBRARY} CACHE STRING "") set (CURL_VERSION_STRING 7.67.0 CACHE STRING "") diff --git a/contrib/cyrus-sasl b/contrib/cyrus-sasl index 9995bf9d8e1..e6466edfd63 160000 --- a/contrib/cyrus-sasl +++ b/contrib/cyrus-sasl @@ -1 +1 @@ -Subproject commit 9995bf9d8e14f58934d9313ac64f13780d6dd3c9 +Subproject commit e6466edfd638cc5073debe941c53345b18a09512 diff --git a/contrib/cyrus-sasl-cmake/CMakeLists.txt b/contrib/cyrus-sasl-cmake/CMakeLists.txt index 5003c9a21db..aa25a078718 100644 --- a/contrib/cyrus-sasl-cmake/CMakeLists.txt +++ b/contrib/cyrus-sasl-cmake/CMakeLists.txt @@ -1,23 +1,23 @@ -set(CYRUS_SASL_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/cyrus-sasl) +set(CYRUS_SASL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/cyrus-sasl") add_library(${CYRUS_SASL_LIBRARY}) target_sources(${CYRUS_SASL_LIBRARY} PRIVATE - ${CYRUS_SASL_SOURCE_DIR}/plugins/gssapi.c - # ${CYRUS_SASL_SOURCE_DIR}/plugins/gssapiv2_init.c - ${CYRUS_SASL_SOURCE_DIR}/common/plugin_common.c - ${CYRUS_SASL_SOURCE_DIR}/lib/common.c - ${CYRUS_SASL_SOURCE_DIR}/lib/canonusr.c - ${CYRUS_SASL_SOURCE_DIR}/lib/server.c - ${CYRUS_SASL_SOURCE_DIR}/lib/config.c - ${CYRUS_SASL_SOURCE_DIR}/lib/auxprop.c - ${CYRUS_SASL_SOURCE_DIR}/lib/saslutil.c - ${CYRUS_SASL_SOURCE_DIR}/lib/external.c - ${CYRUS_SASL_SOURCE_DIR}/lib/seterror.c - ${CYRUS_SASL_SOURCE_DIR}/lib/md5.c - ${CYRUS_SASL_SOURCE_DIR}/lib/dlopen.c - ${CYRUS_SASL_SOURCE_DIR}/lib/client.c - ${CYRUS_SASL_SOURCE_DIR}/lib/checkpw.c + "${CYRUS_SASL_SOURCE_DIR}/plugins/gssapi.c" + # "${CYRUS_SASL_SOURCE_DIR}/plugins/gssapiv2_init.c" + "${CYRUS_SASL_SOURCE_DIR}/common/plugin_common.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/common.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/canonusr.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/server.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/config.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/auxprop.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/saslutil.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/external.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/seterror.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/md5.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/dlopen.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/client.c" + "${CYRUS_SASL_SOURCE_DIR}/lib/checkpw.c" ) target_include_directories(${CYRUS_SASL_LIBRARY} PUBLIC @@ -26,16 +26,16 @@ target_include_directories(${CYRUS_SASL_LIBRARY} PUBLIC target_include_directories(${CYRUS_SASL_LIBRARY} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} # for config.h - ${CYRUS_SASL_SOURCE_DIR}/plugins + "${CYRUS_SASL_SOURCE_DIR}/plugins" ${CYRUS_SASL_SOURCE_DIR} - ${CYRUS_SASL_SOURCE_DIR}/include - ${CYRUS_SASL_SOURCE_DIR}/lib - ${CYRUS_SASL_SOURCE_DIR}/sasldb - ${CYRUS_SASL_SOURCE_DIR}/common - ${CYRUS_SASL_SOURCE_DIR}/saslauthd - ${CYRUS_SASL_SOURCE_DIR}/sample - ${CYRUS_SASL_SOURCE_DIR}/utils - ${CYRUS_SASL_SOURCE_DIR}/tests + "${CYRUS_SASL_SOURCE_DIR}/include" + "${CYRUS_SASL_SOURCE_DIR}/lib" + "${CYRUS_SASL_SOURCE_DIR}/sasldb" + "${CYRUS_SASL_SOURCE_DIR}/common" + "${CYRUS_SASL_SOURCE_DIR}/saslauthd" + "${CYRUS_SASL_SOURCE_DIR}/sample" + "${CYRUS_SASL_SOURCE_DIR}/utils" + "${CYRUS_SASL_SOURCE_DIR}/tests" ) target_compile_definitions(${CYRUS_SASL_LIBRARY} PUBLIC @@ -52,15 +52,15 @@ target_compile_definitions(${CYRUS_SASL_LIBRARY} PUBLIC LIBSASL_EXPORTS=1 ) -file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/sasl) +file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/sasl") file(COPY - ${CYRUS_SASL_SOURCE_DIR}/include/sasl.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/sasl + "${CYRUS_SASL_SOURCE_DIR}/include/sasl.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/sasl" ) file(COPY - ${CYRUS_SASL_SOURCE_DIR}/include/prop.h + "${CYRUS_SASL_SOURCE_DIR}/include/prop.h" DESTINATION ${CMAKE_CURRENT_BINARY_DIR} ) diff --git a/contrib/datasketches-cpp b/contrib/datasketches-cpp index f915d35b2de..7d73d7610db 160000 --- a/contrib/datasketches-cpp +++ b/contrib/datasketches-cpp @@ -1 +1 @@ -Subproject commit f915d35b2de676683493c86c585141a1e1c83334 +Subproject commit 7d73d7610db31d4e1ecde0fb3a7ee90ef371207f diff --git a/contrib/double-conversion-cmake/CMakeLists.txt b/contrib/double-conversion-cmake/CMakeLists.txt index 0690731e1b1..c8bf1b34b8f 100644 --- a/contrib/double-conversion-cmake/CMakeLists.txt +++ b/contrib/double-conversion-cmake/CMakeLists.txt @@ -1,13 +1,13 @@ -SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/double-conversion) +SET(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/double-conversion") add_library(double-conversion -${LIBRARY_DIR}/double-conversion/bignum.cc -${LIBRARY_DIR}/double-conversion/bignum-dtoa.cc -${LIBRARY_DIR}/double-conversion/cached-powers.cc -${LIBRARY_DIR}/double-conversion/diy-fp.cc -${LIBRARY_DIR}/double-conversion/double-conversion.cc -${LIBRARY_DIR}/double-conversion/fast-dtoa.cc -${LIBRARY_DIR}/double-conversion/fixed-dtoa.cc -${LIBRARY_DIR}/double-conversion/strtod.cc) +"${LIBRARY_DIR}/double-conversion/bignum.cc" +"${LIBRARY_DIR}/double-conversion/bignum-dtoa.cc" +"${LIBRARY_DIR}/double-conversion/cached-powers.cc" +"${LIBRARY_DIR}/double-conversion/diy-fp.cc" +"${LIBRARY_DIR}/double-conversion/double-conversion.cc" +"${LIBRARY_DIR}/double-conversion/fast-dtoa.cc" +"${LIBRARY_DIR}/double-conversion/fixed-dtoa.cc" +"${LIBRARY_DIR}/double-conversion/strtod.cc") target_include_directories(double-conversion SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}") diff --git a/contrib/fastops-cmake/CMakeLists.txt b/contrib/fastops-cmake/CMakeLists.txt index 0269d5603c2..fe7293c614b 100644 --- a/contrib/fastops-cmake/CMakeLists.txt +++ b/contrib/fastops-cmake/CMakeLists.txt @@ -1,18 +1,18 @@ -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/fastops) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/fastops") set(SRCS "") if(HAVE_AVX) - set (SRCS ${SRCS} ${LIBRARY_DIR}/fastops/avx/ops_avx.cpp) - set_source_files_properties(${LIBRARY_DIR}/fastops/avx/ops_avx.cpp PROPERTIES COMPILE_FLAGS "-mavx -DNO_AVX2") + set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx/ops_avx.cpp") + set_source_files_properties("${LIBRARY_DIR}/fastops/avx/ops_avx.cpp" PROPERTIES COMPILE_FLAGS "-mavx -DNO_AVX2") endif() if(HAVE_AVX2) - set (SRCS ${SRCS} ${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp) - set_source_files_properties(${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp PROPERTIES COMPILE_FLAGS "-mavx2 -mfma") + set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp") + set_source_files_properties("${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp" PROPERTIES COMPILE_FLAGS "-mavx2 -mfma") endif() -set (SRCS ${SRCS} ${LIBRARY_DIR}/fastops/plain/ops_plain.cpp ${LIBRARY_DIR}/fastops/core/avx_id.cpp ${LIBRARY_DIR}/fastops/fastops.cpp) +set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/plain/ops_plain.cpp" "${LIBRARY_DIR}/fastops/core/avx_id.cpp" "${LIBRARY_DIR}/fastops/fastops.cpp") add_library(fastops ${SRCS}) diff --git a/contrib/flatbuffers b/contrib/flatbuffers index 6df40a24717..22e3ffc66d2 160000 --- a/contrib/flatbuffers +++ b/contrib/flatbuffers @@ -1 +1 @@ -Subproject commit 6df40a2471737b27271bdd9b900ab5f3aec746c7 +Subproject commit 22e3ffc66d2d7d72d1414390aa0f04ffd114a5a1 diff --git a/contrib/grpc b/contrib/grpc index 8d558f03fe3..5b79aae85c5 160000 --- a/contrib/grpc +++ b/contrib/grpc @@ -1 +1 @@ -Subproject commit 8d558f03fe370240081424fafa76cdc9301ea14b +Subproject commit 5b79aae85c515e0df4abfb7b1e07975fdc7cecc1 diff --git a/contrib/h3-cmake/CMakeLists.txt b/contrib/h3-cmake/CMakeLists.txt index 2911d7283f0..6b184a175b0 100644 --- a/contrib/h3-cmake/CMakeLists.txt +++ b/contrib/h3-cmake/CMakeLists.txt @@ -1,30 +1,30 @@ -set(H3_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/h3/src/h3lib) -set(H3_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/h3/src/h3lib) +set(H3_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/h3/src/h3lib") +set(H3_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/h3/src/h3lib") set(SRCS -${H3_SOURCE_DIR}/lib/algos.c -${H3_SOURCE_DIR}/lib/baseCells.c -${H3_SOURCE_DIR}/lib/bbox.c -${H3_SOURCE_DIR}/lib/coordijk.c -${H3_SOURCE_DIR}/lib/faceijk.c -${H3_SOURCE_DIR}/lib/geoCoord.c -${H3_SOURCE_DIR}/lib/h3Index.c -${H3_SOURCE_DIR}/lib/h3UniEdge.c -${H3_SOURCE_DIR}/lib/linkedGeo.c -${H3_SOURCE_DIR}/lib/localij.c -${H3_SOURCE_DIR}/lib/mathExtensions.c -${H3_SOURCE_DIR}/lib/polygon.c -${H3_SOURCE_DIR}/lib/vec2d.c -${H3_SOURCE_DIR}/lib/vec3d.c -${H3_SOURCE_DIR}/lib/vertex.c -${H3_SOURCE_DIR}/lib/vertexGraph.c +"${H3_SOURCE_DIR}/lib/algos.c" +"${H3_SOURCE_DIR}/lib/baseCells.c" +"${H3_SOURCE_DIR}/lib/bbox.c" +"${H3_SOURCE_DIR}/lib/coordijk.c" +"${H3_SOURCE_DIR}/lib/faceijk.c" +"${H3_SOURCE_DIR}/lib/geoCoord.c" +"${H3_SOURCE_DIR}/lib/h3Index.c" +"${H3_SOURCE_DIR}/lib/h3UniEdge.c" +"${H3_SOURCE_DIR}/lib/linkedGeo.c" +"${H3_SOURCE_DIR}/lib/localij.c" +"${H3_SOURCE_DIR}/lib/mathExtensions.c" +"${H3_SOURCE_DIR}/lib/polygon.c" +"${H3_SOURCE_DIR}/lib/vec2d.c" +"${H3_SOURCE_DIR}/lib/vec3d.c" +"${H3_SOURCE_DIR}/lib/vertex.c" +"${H3_SOURCE_DIR}/lib/vertexGraph.c" ) -configure_file(${H3_SOURCE_DIR}/include/h3api.h.in ${H3_BINARY_DIR}/include/h3api.h) +configure_file("${H3_SOURCE_DIR}/include/h3api.h.in" "${H3_BINARY_DIR}/include/h3api.h") add_library(h3 ${SRCS}) -target_include_directories(h3 SYSTEM PUBLIC ${H3_SOURCE_DIR}/include) -target_include_directories(h3 SYSTEM PUBLIC ${H3_BINARY_DIR}/include) +target_include_directories(h3 SYSTEM PUBLIC "${H3_SOURCE_DIR}/include") +target_include_directories(h3 SYSTEM PUBLIC "${H3_BINARY_DIR}/include") target_compile_definitions(h3 PRIVATE H3_HAVE_VLA) if(M_LIBRARY) target_link_libraries(h3 PRIVATE ${M_LIBRARY}) diff --git a/contrib/hyperscan-cmake/CMakeLists.txt b/contrib/hyperscan-cmake/CMakeLists.txt index 75c45ff7bf5..6a364da126d 100644 --- a/contrib/hyperscan-cmake/CMakeLists.txt +++ b/contrib/hyperscan-cmake/CMakeLists.txt @@ -40,211 +40,211 @@ endif () if (NOT EXTERNAL_HYPERSCAN_LIBRARY_FOUND) set (USE_INTERNAL_HYPERSCAN_LIBRARY 1) - set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/hyperscan) + set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/hyperscan") set (SRCS - ${LIBRARY_DIR}/src/alloc.c - ${LIBRARY_DIR}/src/compiler/asserts.cpp - ${LIBRARY_DIR}/src/compiler/compiler.cpp - ${LIBRARY_DIR}/src/compiler/error.cpp - ${LIBRARY_DIR}/src/crc32.c - ${LIBRARY_DIR}/src/database.c - ${LIBRARY_DIR}/src/fdr/engine_description.cpp - ${LIBRARY_DIR}/src/fdr/fdr_compile_util.cpp - ${LIBRARY_DIR}/src/fdr/fdr_compile.cpp - ${LIBRARY_DIR}/src/fdr/fdr_confirm_compile.cpp - ${LIBRARY_DIR}/src/fdr/fdr_engine_description.cpp - ${LIBRARY_DIR}/src/fdr/fdr.c - ${LIBRARY_DIR}/src/fdr/flood_compile.cpp - ${LIBRARY_DIR}/src/fdr/teddy_compile.cpp - ${LIBRARY_DIR}/src/fdr/teddy_engine_description.cpp - ${LIBRARY_DIR}/src/fdr/teddy.c - ${LIBRARY_DIR}/src/grey.cpp - ${LIBRARY_DIR}/src/hs_valid_platform.c - ${LIBRARY_DIR}/src/hs_version.c - ${LIBRARY_DIR}/src/hs.cpp - ${LIBRARY_DIR}/src/hwlm/hwlm_build.cpp - ${LIBRARY_DIR}/src/hwlm/hwlm_literal.cpp - ${LIBRARY_DIR}/src/hwlm/hwlm.c - ${LIBRARY_DIR}/src/hwlm/noodle_build.cpp - ${LIBRARY_DIR}/src/hwlm/noodle_engine.c - ${LIBRARY_DIR}/src/nfa/accel_dfa_build_strat.cpp - ${LIBRARY_DIR}/src/nfa/accel.c - ${LIBRARY_DIR}/src/nfa/accelcompile.cpp - ${LIBRARY_DIR}/src/nfa/castle.c - ${LIBRARY_DIR}/src/nfa/castlecompile.cpp - ${LIBRARY_DIR}/src/nfa/dfa_build_strat.cpp - ${LIBRARY_DIR}/src/nfa/dfa_min.cpp - ${LIBRARY_DIR}/src/nfa/gough.c - ${LIBRARY_DIR}/src/nfa/goughcompile_accel.cpp - ${LIBRARY_DIR}/src/nfa/goughcompile_reg.cpp - ${LIBRARY_DIR}/src/nfa/goughcompile.cpp - ${LIBRARY_DIR}/src/nfa/lbr.c - ${LIBRARY_DIR}/src/nfa/limex_64.c - ${LIBRARY_DIR}/src/nfa/limex_accel.c - ${LIBRARY_DIR}/src/nfa/limex_compile.cpp - ${LIBRARY_DIR}/src/nfa/limex_native.c - ${LIBRARY_DIR}/src/nfa/limex_simd128.c - ${LIBRARY_DIR}/src/nfa/limex_simd256.c - ${LIBRARY_DIR}/src/nfa/limex_simd384.c - ${LIBRARY_DIR}/src/nfa/limex_simd512.c - ${LIBRARY_DIR}/src/nfa/mcclellan.c - ${LIBRARY_DIR}/src/nfa/mcclellancompile_util.cpp - ${LIBRARY_DIR}/src/nfa/mcclellancompile.cpp - ${LIBRARY_DIR}/src/nfa/mcsheng_compile.cpp - ${LIBRARY_DIR}/src/nfa/mcsheng_data.c - ${LIBRARY_DIR}/src/nfa/mcsheng.c - ${LIBRARY_DIR}/src/nfa/mpv.c - ${LIBRARY_DIR}/src/nfa/mpvcompile.cpp - ${LIBRARY_DIR}/src/nfa/nfa_api_dispatch.c - ${LIBRARY_DIR}/src/nfa/nfa_build_util.cpp - ${LIBRARY_DIR}/src/nfa/rdfa_graph.cpp - ${LIBRARY_DIR}/src/nfa/rdfa_merge.cpp - ${LIBRARY_DIR}/src/nfa/rdfa.cpp - ${LIBRARY_DIR}/src/nfa/repeat.c - ${LIBRARY_DIR}/src/nfa/repeatcompile.cpp - ${LIBRARY_DIR}/src/nfa/sheng.c - ${LIBRARY_DIR}/src/nfa/shengcompile.cpp - ${LIBRARY_DIR}/src/nfa/shufti.c - ${LIBRARY_DIR}/src/nfa/shufticompile.cpp - ${LIBRARY_DIR}/src/nfa/tamarama.c - ${LIBRARY_DIR}/src/nfa/tamaramacompile.cpp - ${LIBRARY_DIR}/src/nfa/truffle.c - ${LIBRARY_DIR}/src/nfa/trufflecompile.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_anchored_acyclic.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_anchored_dots.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_asserts.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_builder.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_calc_components.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_cyclic_redundancy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_depth.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_dominators.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_edge_redundancy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_equivalence.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_execute.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_expr_info.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_extparam.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_fixed_width.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_fuzzy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_haig.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_holder.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_is_equal.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_lbr.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_limex_accel.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_limex.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_literal_analysis.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_literal_component.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_literal_decorated.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_mcclellan.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_misc_opt.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_netflow.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_prefilter.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_prune.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_puff.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_redundancy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_region_redundancy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_region.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_repeat.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_reports.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_restructuring.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_revacc.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_sep.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_small_literal_set.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_som_add_redundancy.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_som_util.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_som.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_split.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_squash.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_stop.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_uncalc_components.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_utf8.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_util.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_vacuous.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_violet.cpp - ${LIBRARY_DIR}/src/nfagraph/ng_width.cpp - ${LIBRARY_DIR}/src/nfagraph/ng.cpp - ${LIBRARY_DIR}/src/parser/AsciiComponentClass.cpp - ${LIBRARY_DIR}/src/parser/buildstate.cpp - ${LIBRARY_DIR}/src/parser/check_refs.cpp - ${LIBRARY_DIR}/src/parser/Component.cpp - ${LIBRARY_DIR}/src/parser/ComponentAlternation.cpp - ${LIBRARY_DIR}/src/parser/ComponentAssertion.cpp - ${LIBRARY_DIR}/src/parser/ComponentAtomicGroup.cpp - ${LIBRARY_DIR}/src/parser/ComponentBackReference.cpp - ${LIBRARY_DIR}/src/parser/ComponentBoundary.cpp - ${LIBRARY_DIR}/src/parser/ComponentByte.cpp - ${LIBRARY_DIR}/src/parser/ComponentClass.cpp - ${LIBRARY_DIR}/src/parser/ComponentCondReference.cpp - ${LIBRARY_DIR}/src/parser/ComponentEmpty.cpp - ${LIBRARY_DIR}/src/parser/ComponentEUS.cpp - ${LIBRARY_DIR}/src/parser/ComponentRepeat.cpp - ${LIBRARY_DIR}/src/parser/ComponentSequence.cpp - ${LIBRARY_DIR}/src/parser/ComponentVisitor.cpp - ${LIBRARY_DIR}/src/parser/ComponentWordBoundary.cpp - ${LIBRARY_DIR}/src/parser/ConstComponentVisitor.cpp - ${LIBRARY_DIR}/src/parser/control_verbs.cpp - ${LIBRARY_DIR}/src/parser/logical_combination.cpp - ${LIBRARY_DIR}/src/parser/parse_error.cpp - ${LIBRARY_DIR}/src/parser/parser_util.cpp - ${LIBRARY_DIR}/src/parser/Parser.cpp - ${LIBRARY_DIR}/src/parser/prefilter.cpp - ${LIBRARY_DIR}/src/parser/shortcut_literal.cpp - ${LIBRARY_DIR}/src/parser/ucp_table.cpp - ${LIBRARY_DIR}/src/parser/unsupported.cpp - ${LIBRARY_DIR}/src/parser/utf8_validate.cpp - ${LIBRARY_DIR}/src/parser/Utf8ComponentClass.cpp - ${LIBRARY_DIR}/src/rose/block.c - ${LIBRARY_DIR}/src/rose/catchup.c - ${LIBRARY_DIR}/src/rose/init.c - ${LIBRARY_DIR}/src/rose/match.c - ${LIBRARY_DIR}/src/rose/program_runtime.c - ${LIBRARY_DIR}/src/rose/rose_build_add_mask.cpp - ${LIBRARY_DIR}/src/rose/rose_build_add.cpp - ${LIBRARY_DIR}/src/rose/rose_build_anchored.cpp - ${LIBRARY_DIR}/src/rose/rose_build_bytecode.cpp - ${LIBRARY_DIR}/src/rose/rose_build_castle.cpp - ${LIBRARY_DIR}/src/rose/rose_build_compile.cpp - ${LIBRARY_DIR}/src/rose/rose_build_convert.cpp - ${LIBRARY_DIR}/src/rose/rose_build_dedupe.cpp - ${LIBRARY_DIR}/src/rose/rose_build_engine_blob.cpp - ${LIBRARY_DIR}/src/rose/rose_build_exclusive.cpp - ${LIBRARY_DIR}/src/rose/rose_build_groups.cpp - ${LIBRARY_DIR}/src/rose/rose_build_infix.cpp - ${LIBRARY_DIR}/src/rose/rose_build_instructions.cpp - ${LIBRARY_DIR}/src/rose/rose_build_lit_accel.cpp - ${LIBRARY_DIR}/src/rose/rose_build_long_lit.cpp - ${LIBRARY_DIR}/src/rose/rose_build_lookaround.cpp - ${LIBRARY_DIR}/src/rose/rose_build_matchers.cpp - ${LIBRARY_DIR}/src/rose/rose_build_merge.cpp - ${LIBRARY_DIR}/src/rose/rose_build_misc.cpp - ${LIBRARY_DIR}/src/rose/rose_build_program.cpp - ${LIBRARY_DIR}/src/rose/rose_build_role_aliasing.cpp - ${LIBRARY_DIR}/src/rose/rose_build_scatter.cpp - ${LIBRARY_DIR}/src/rose/rose_build_width.cpp - ${LIBRARY_DIR}/src/rose/rose_in_util.cpp - ${LIBRARY_DIR}/src/rose/stream.c - ${LIBRARY_DIR}/src/runtime.c - ${LIBRARY_DIR}/src/scratch.c - ${LIBRARY_DIR}/src/smallwrite/smallwrite_build.cpp - ${LIBRARY_DIR}/src/som/slot_manager.cpp - ${LIBRARY_DIR}/src/som/som_runtime.c - ${LIBRARY_DIR}/src/som/som_stream.c - ${LIBRARY_DIR}/src/stream_compress.c - ${LIBRARY_DIR}/src/util/alloc.cpp - ${LIBRARY_DIR}/src/util/charreach.cpp - ${LIBRARY_DIR}/src/util/clique.cpp - ${LIBRARY_DIR}/src/util/compile_context.cpp - ${LIBRARY_DIR}/src/util/compile_error.cpp - ${LIBRARY_DIR}/src/util/cpuid_flags.c - ${LIBRARY_DIR}/src/util/depth.cpp - ${LIBRARY_DIR}/src/util/fatbit_build.cpp - ${LIBRARY_DIR}/src/util/multibit_build.cpp - ${LIBRARY_DIR}/src/util/multibit.c - ${LIBRARY_DIR}/src/util/report_manager.cpp - ${LIBRARY_DIR}/src/util/simd_utils.c - ${LIBRARY_DIR}/src/util/state_compress.c - ${LIBRARY_DIR}/src/util/target_info.cpp - ${LIBRARY_DIR}/src/util/ue2string.cpp + "${LIBRARY_DIR}/src/alloc.c" + "${LIBRARY_DIR}/src/compiler/asserts.cpp" + "${LIBRARY_DIR}/src/compiler/compiler.cpp" + "${LIBRARY_DIR}/src/compiler/error.cpp" + "${LIBRARY_DIR}/src/crc32.c" + "${LIBRARY_DIR}/src/database.c" + "${LIBRARY_DIR}/src/fdr/engine_description.cpp" + "${LIBRARY_DIR}/src/fdr/fdr_compile_util.cpp" + "${LIBRARY_DIR}/src/fdr/fdr_compile.cpp" + "${LIBRARY_DIR}/src/fdr/fdr_confirm_compile.cpp" + "${LIBRARY_DIR}/src/fdr/fdr_engine_description.cpp" + "${LIBRARY_DIR}/src/fdr/fdr.c" + "${LIBRARY_DIR}/src/fdr/flood_compile.cpp" + "${LIBRARY_DIR}/src/fdr/teddy_compile.cpp" + "${LIBRARY_DIR}/src/fdr/teddy_engine_description.cpp" + "${LIBRARY_DIR}/src/fdr/teddy.c" + "${LIBRARY_DIR}/src/grey.cpp" + "${LIBRARY_DIR}/src/hs_valid_platform.c" + "${LIBRARY_DIR}/src/hs_version.c" + "${LIBRARY_DIR}/src/hs.cpp" + "${LIBRARY_DIR}/src/hwlm/hwlm_build.cpp" + "${LIBRARY_DIR}/src/hwlm/hwlm_literal.cpp" + "${LIBRARY_DIR}/src/hwlm/hwlm.c" + "${LIBRARY_DIR}/src/hwlm/noodle_build.cpp" + "${LIBRARY_DIR}/src/hwlm/noodle_engine.c" + "${LIBRARY_DIR}/src/nfa/accel_dfa_build_strat.cpp" + "${LIBRARY_DIR}/src/nfa/accel.c" + "${LIBRARY_DIR}/src/nfa/accelcompile.cpp" + "${LIBRARY_DIR}/src/nfa/castle.c" + "${LIBRARY_DIR}/src/nfa/castlecompile.cpp" + "${LIBRARY_DIR}/src/nfa/dfa_build_strat.cpp" + "${LIBRARY_DIR}/src/nfa/dfa_min.cpp" + "${LIBRARY_DIR}/src/nfa/gough.c" + "${LIBRARY_DIR}/src/nfa/goughcompile_accel.cpp" + "${LIBRARY_DIR}/src/nfa/goughcompile_reg.cpp" + "${LIBRARY_DIR}/src/nfa/goughcompile.cpp" + "${LIBRARY_DIR}/src/nfa/lbr.c" + "${LIBRARY_DIR}/src/nfa/limex_64.c" + "${LIBRARY_DIR}/src/nfa/limex_accel.c" + "${LIBRARY_DIR}/src/nfa/limex_compile.cpp" + "${LIBRARY_DIR}/src/nfa/limex_native.c" + "${LIBRARY_DIR}/src/nfa/limex_simd128.c" + "${LIBRARY_DIR}/src/nfa/limex_simd256.c" + "${LIBRARY_DIR}/src/nfa/limex_simd384.c" + "${LIBRARY_DIR}/src/nfa/limex_simd512.c" + "${LIBRARY_DIR}/src/nfa/mcclellan.c" + "${LIBRARY_DIR}/src/nfa/mcclellancompile_util.cpp" + "${LIBRARY_DIR}/src/nfa/mcclellancompile.cpp" + "${LIBRARY_DIR}/src/nfa/mcsheng_compile.cpp" + "${LIBRARY_DIR}/src/nfa/mcsheng_data.c" + "${LIBRARY_DIR}/src/nfa/mcsheng.c" + "${LIBRARY_DIR}/src/nfa/mpv.c" + "${LIBRARY_DIR}/src/nfa/mpvcompile.cpp" + "${LIBRARY_DIR}/src/nfa/nfa_api_dispatch.c" + "${LIBRARY_DIR}/src/nfa/nfa_build_util.cpp" + "${LIBRARY_DIR}/src/nfa/rdfa_graph.cpp" + "${LIBRARY_DIR}/src/nfa/rdfa_merge.cpp" + "${LIBRARY_DIR}/src/nfa/rdfa.cpp" + "${LIBRARY_DIR}/src/nfa/repeat.c" + "${LIBRARY_DIR}/src/nfa/repeatcompile.cpp" + "${LIBRARY_DIR}/src/nfa/sheng.c" + "${LIBRARY_DIR}/src/nfa/shengcompile.cpp" + "${LIBRARY_DIR}/src/nfa/shufti.c" + "${LIBRARY_DIR}/src/nfa/shufticompile.cpp" + "${LIBRARY_DIR}/src/nfa/tamarama.c" + "${LIBRARY_DIR}/src/nfa/tamaramacompile.cpp" + "${LIBRARY_DIR}/src/nfa/truffle.c" + "${LIBRARY_DIR}/src/nfa/trufflecompile.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_anchored_acyclic.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_anchored_dots.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_asserts.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_builder.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_calc_components.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_cyclic_redundancy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_depth.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_dominators.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_edge_redundancy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_equivalence.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_execute.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_expr_info.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_extparam.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_fixed_width.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_fuzzy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_haig.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_holder.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_is_equal.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_lbr.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_limex_accel.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_limex.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_literal_analysis.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_literal_component.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_literal_decorated.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_mcclellan.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_misc_opt.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_netflow.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_prefilter.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_prune.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_puff.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_redundancy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_region_redundancy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_region.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_repeat.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_reports.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_restructuring.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_revacc.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_sep.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_small_literal_set.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_som_add_redundancy.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_som_util.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_som.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_split.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_squash.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_stop.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_uncalc_components.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_utf8.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_util.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_vacuous.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_violet.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng_width.cpp" + "${LIBRARY_DIR}/src/nfagraph/ng.cpp" + "${LIBRARY_DIR}/src/parser/AsciiComponentClass.cpp" + "${LIBRARY_DIR}/src/parser/buildstate.cpp" + "${LIBRARY_DIR}/src/parser/check_refs.cpp" + "${LIBRARY_DIR}/src/parser/Component.cpp" + "${LIBRARY_DIR}/src/parser/ComponentAlternation.cpp" + "${LIBRARY_DIR}/src/parser/ComponentAssertion.cpp" + "${LIBRARY_DIR}/src/parser/ComponentAtomicGroup.cpp" + "${LIBRARY_DIR}/src/parser/ComponentBackReference.cpp" + "${LIBRARY_DIR}/src/parser/ComponentBoundary.cpp" + "${LIBRARY_DIR}/src/parser/ComponentByte.cpp" + "${LIBRARY_DIR}/src/parser/ComponentClass.cpp" + "${LIBRARY_DIR}/src/parser/ComponentCondReference.cpp" + "${LIBRARY_DIR}/src/parser/ComponentEmpty.cpp" + "${LIBRARY_DIR}/src/parser/ComponentEUS.cpp" + "${LIBRARY_DIR}/src/parser/ComponentRepeat.cpp" + "${LIBRARY_DIR}/src/parser/ComponentSequence.cpp" + "${LIBRARY_DIR}/src/parser/ComponentVisitor.cpp" + "${LIBRARY_DIR}/src/parser/ComponentWordBoundary.cpp" + "${LIBRARY_DIR}/src/parser/ConstComponentVisitor.cpp" + "${LIBRARY_DIR}/src/parser/control_verbs.cpp" + "${LIBRARY_DIR}/src/parser/logical_combination.cpp" + "${LIBRARY_DIR}/src/parser/parse_error.cpp" + "${LIBRARY_DIR}/src/parser/parser_util.cpp" + "${LIBRARY_DIR}/src/parser/Parser.cpp" + "${LIBRARY_DIR}/src/parser/prefilter.cpp" + "${LIBRARY_DIR}/src/parser/shortcut_literal.cpp" + "${LIBRARY_DIR}/src/parser/ucp_table.cpp" + "${LIBRARY_DIR}/src/parser/unsupported.cpp" + "${LIBRARY_DIR}/src/parser/utf8_validate.cpp" + "${LIBRARY_DIR}/src/parser/Utf8ComponentClass.cpp" + "${LIBRARY_DIR}/src/rose/block.c" + "${LIBRARY_DIR}/src/rose/catchup.c" + "${LIBRARY_DIR}/src/rose/init.c" + "${LIBRARY_DIR}/src/rose/match.c" + "${LIBRARY_DIR}/src/rose/program_runtime.c" + "${LIBRARY_DIR}/src/rose/rose_build_add_mask.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_add.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_anchored.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_bytecode.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_castle.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_compile.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_convert.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_dedupe.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_engine_blob.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_exclusive.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_groups.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_infix.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_instructions.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_lit_accel.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_long_lit.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_lookaround.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_matchers.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_merge.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_misc.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_program.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_role_aliasing.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_scatter.cpp" + "${LIBRARY_DIR}/src/rose/rose_build_width.cpp" + "${LIBRARY_DIR}/src/rose/rose_in_util.cpp" + "${LIBRARY_DIR}/src/rose/stream.c" + "${LIBRARY_DIR}/src/runtime.c" + "${LIBRARY_DIR}/src/scratch.c" + "${LIBRARY_DIR}/src/smallwrite/smallwrite_build.cpp" + "${LIBRARY_DIR}/src/som/slot_manager.cpp" + "${LIBRARY_DIR}/src/som/som_runtime.c" + "${LIBRARY_DIR}/src/som/som_stream.c" + "${LIBRARY_DIR}/src/stream_compress.c" + "${LIBRARY_DIR}/src/util/alloc.cpp" + "${LIBRARY_DIR}/src/util/charreach.cpp" + "${LIBRARY_DIR}/src/util/clique.cpp" + "${LIBRARY_DIR}/src/util/compile_context.cpp" + "${LIBRARY_DIR}/src/util/compile_error.cpp" + "${LIBRARY_DIR}/src/util/cpuid_flags.c" + "${LIBRARY_DIR}/src/util/depth.cpp" + "${LIBRARY_DIR}/src/util/fatbit_build.cpp" + "${LIBRARY_DIR}/src/util/multibit_build.cpp" + "${LIBRARY_DIR}/src/util/multibit.c" + "${LIBRARY_DIR}/src/util/report_manager.cpp" + "${LIBRARY_DIR}/src/util/simd_utils.c" + "${LIBRARY_DIR}/src/util/state_compress.c" + "${LIBRARY_DIR}/src/util/target_info.cpp" + "${LIBRARY_DIR}/src/util/ue2string.cpp" ) add_library (hyperscan ${SRCS}) @@ -259,9 +259,9 @@ if (NOT EXTERNAL_HYPERSCAN_LIBRARY_FOUND) target_include_directories (hyperscan PRIVATE common - ${LIBRARY_DIR}/include + "${LIBRARY_DIR}/include" ) - target_include_directories (hyperscan SYSTEM PUBLIC ${LIBRARY_DIR}/src) + target_include_directories (hyperscan SYSTEM PUBLIC "${LIBRARY_DIR}/src") if (ARCH_AMD64) target_include_directories (hyperscan PRIVATE x86_64) endif () diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index 884f5c3a336..26f3bb11006 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -1,447 +1,447 @@ -set(ICU_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source) -set(ICUDATA_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/icudata/) +set(ICU_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source") +set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/") set (CMAKE_CXX_STANDARD 17) # These lists of sources were generated from build log of the original ICU build system (configure + make). set(ICUUC_SOURCES -${ICU_SOURCE_DIR}/common/errorcode.cpp -${ICU_SOURCE_DIR}/common/putil.cpp -${ICU_SOURCE_DIR}/common/umath.cpp -${ICU_SOURCE_DIR}/common/utypes.cpp -${ICU_SOURCE_DIR}/common/uinvchar.cpp -${ICU_SOURCE_DIR}/common/umutex.cpp -${ICU_SOURCE_DIR}/common/ucln_cmn.cpp -${ICU_SOURCE_DIR}/common/uinit.cpp -${ICU_SOURCE_DIR}/common/uobject.cpp -${ICU_SOURCE_DIR}/common/cmemory.cpp -${ICU_SOURCE_DIR}/common/charstr.cpp -${ICU_SOURCE_DIR}/common/cstr.cpp -${ICU_SOURCE_DIR}/common/udata.cpp -${ICU_SOURCE_DIR}/common/ucmndata.cpp -${ICU_SOURCE_DIR}/common/udatamem.cpp -${ICU_SOURCE_DIR}/common/umapfile.cpp -${ICU_SOURCE_DIR}/common/udataswp.cpp -${ICU_SOURCE_DIR}/common/utrie_swap.cpp -${ICU_SOURCE_DIR}/common/ucol_swp.cpp -${ICU_SOURCE_DIR}/common/utrace.cpp -${ICU_SOURCE_DIR}/common/uhash.cpp -${ICU_SOURCE_DIR}/common/uhash_us.cpp -${ICU_SOURCE_DIR}/common/uenum.cpp -${ICU_SOURCE_DIR}/common/ustrenum.cpp -${ICU_SOURCE_DIR}/common/uvector.cpp -${ICU_SOURCE_DIR}/common/ustack.cpp -${ICU_SOURCE_DIR}/common/uvectr32.cpp -${ICU_SOURCE_DIR}/common/uvectr64.cpp -${ICU_SOURCE_DIR}/common/ucnv.cpp -${ICU_SOURCE_DIR}/common/ucnv_bld.cpp -${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp -${ICU_SOURCE_DIR}/common/ucnv_io.cpp -${ICU_SOURCE_DIR}/common/ucnv_cb.cpp -${ICU_SOURCE_DIR}/common/ucnv_err.cpp -${ICU_SOURCE_DIR}/common/ucnvlat1.cpp -${ICU_SOURCE_DIR}/common/ucnv_u7.cpp -${ICU_SOURCE_DIR}/common/ucnv_u8.cpp -${ICU_SOURCE_DIR}/common/ucnv_u16.cpp -${ICU_SOURCE_DIR}/common/ucnv_u32.cpp -${ICU_SOURCE_DIR}/common/ucnvscsu.cpp -${ICU_SOURCE_DIR}/common/ucnvbocu.cpp -${ICU_SOURCE_DIR}/common/ucnv_ext.cpp -${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp -${ICU_SOURCE_DIR}/common/ucnv2022.cpp -${ICU_SOURCE_DIR}/common/ucnvhz.cpp -${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp -${ICU_SOURCE_DIR}/common/ucnvisci.cpp -${ICU_SOURCE_DIR}/common/ucnvdisp.cpp -${ICU_SOURCE_DIR}/common/ucnv_set.cpp -${ICU_SOURCE_DIR}/common/ucnv_ct.cpp -${ICU_SOURCE_DIR}/common/resource.cpp -${ICU_SOURCE_DIR}/common/uresbund.cpp -${ICU_SOURCE_DIR}/common/ures_cnv.cpp -${ICU_SOURCE_DIR}/common/uresdata.cpp -${ICU_SOURCE_DIR}/common/resbund.cpp -${ICU_SOURCE_DIR}/common/resbund_cnv.cpp -${ICU_SOURCE_DIR}/common/ucurr.cpp -${ICU_SOURCE_DIR}/common/localebuilder.cpp -${ICU_SOURCE_DIR}/common/localeprioritylist.cpp -${ICU_SOURCE_DIR}/common/messagepattern.cpp -${ICU_SOURCE_DIR}/common/ucat.cpp -${ICU_SOURCE_DIR}/common/locmap.cpp -${ICU_SOURCE_DIR}/common/uloc.cpp -${ICU_SOURCE_DIR}/common/locid.cpp -${ICU_SOURCE_DIR}/common/locutil.cpp -${ICU_SOURCE_DIR}/common/locavailable.cpp -${ICU_SOURCE_DIR}/common/locdispnames.cpp -${ICU_SOURCE_DIR}/common/locdspnm.cpp -${ICU_SOURCE_DIR}/common/loclikely.cpp -${ICU_SOURCE_DIR}/common/locresdata.cpp -${ICU_SOURCE_DIR}/common/lsr.cpp -${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp -${ICU_SOURCE_DIR}/common/locdistance.cpp -${ICU_SOURCE_DIR}/common/localematcher.cpp -${ICU_SOURCE_DIR}/common/bytestream.cpp -${ICU_SOURCE_DIR}/common/stringpiece.cpp -${ICU_SOURCE_DIR}/common/bytesinkutil.cpp -${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp -${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp -${ICU_SOURCE_DIR}/common/bytestrie.cpp -${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp -${ICU_SOURCE_DIR}/common/ucharstrie.cpp -${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp -${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp -${ICU_SOURCE_DIR}/common/dictionarydata.cpp -${ICU_SOURCE_DIR}/common/edits.cpp -${ICU_SOURCE_DIR}/common/appendable.cpp -${ICU_SOURCE_DIR}/common/ustr_cnv.cpp -${ICU_SOURCE_DIR}/common/unistr_cnv.cpp -${ICU_SOURCE_DIR}/common/unistr.cpp -${ICU_SOURCE_DIR}/common/unistr_case.cpp -${ICU_SOURCE_DIR}/common/unistr_props.cpp -${ICU_SOURCE_DIR}/common/utf_impl.cpp -${ICU_SOURCE_DIR}/common/ustring.cpp -${ICU_SOURCE_DIR}/common/ustrcase.cpp -${ICU_SOURCE_DIR}/common/ucasemap.cpp -${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp -${ICU_SOURCE_DIR}/common/cstring.cpp -${ICU_SOURCE_DIR}/common/ustrfmt.cpp -${ICU_SOURCE_DIR}/common/ustrtrns.cpp -${ICU_SOURCE_DIR}/common/ustr_wcs.cpp -${ICU_SOURCE_DIR}/common/utext.cpp -${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp -${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp -${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp -${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp -${ICU_SOURCE_DIR}/common/normalizer2impl.cpp -${ICU_SOURCE_DIR}/common/normalizer2.cpp -${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp -${ICU_SOURCE_DIR}/common/normlzr.cpp -${ICU_SOURCE_DIR}/common/unorm.cpp -${ICU_SOURCE_DIR}/common/unormcmp.cpp -${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp -${ICU_SOURCE_DIR}/common/chariter.cpp -${ICU_SOURCE_DIR}/common/schriter.cpp -${ICU_SOURCE_DIR}/common/uchriter.cpp -${ICU_SOURCE_DIR}/common/uiter.cpp -${ICU_SOURCE_DIR}/common/patternprops.cpp -${ICU_SOURCE_DIR}/common/uchar.cpp -${ICU_SOURCE_DIR}/common/uprops.cpp -${ICU_SOURCE_DIR}/common/ucase.cpp -${ICU_SOURCE_DIR}/common/propname.cpp -${ICU_SOURCE_DIR}/common/ubidi_props.cpp -${ICU_SOURCE_DIR}/common/characterproperties.cpp -${ICU_SOURCE_DIR}/common/ubidi.cpp -${ICU_SOURCE_DIR}/common/ubidiwrt.cpp -${ICU_SOURCE_DIR}/common/ubidiln.cpp -${ICU_SOURCE_DIR}/common/ushape.cpp -${ICU_SOURCE_DIR}/common/uscript.cpp -${ICU_SOURCE_DIR}/common/uscript_props.cpp -${ICU_SOURCE_DIR}/common/usc_impl.cpp -${ICU_SOURCE_DIR}/common/unames.cpp -${ICU_SOURCE_DIR}/common/utrie.cpp -${ICU_SOURCE_DIR}/common/utrie2.cpp -${ICU_SOURCE_DIR}/common/utrie2_builder.cpp -${ICU_SOURCE_DIR}/common/ucptrie.cpp -${ICU_SOURCE_DIR}/common/umutablecptrie.cpp -${ICU_SOURCE_DIR}/common/bmpset.cpp -${ICU_SOURCE_DIR}/common/unisetspan.cpp -${ICU_SOURCE_DIR}/common/uset_props.cpp -${ICU_SOURCE_DIR}/common/uniset_props.cpp -${ICU_SOURCE_DIR}/common/uniset_closure.cpp -${ICU_SOURCE_DIR}/common/uset.cpp -${ICU_SOURCE_DIR}/common/uniset.cpp -${ICU_SOURCE_DIR}/common/usetiter.cpp -${ICU_SOURCE_DIR}/common/ruleiter.cpp -${ICU_SOURCE_DIR}/common/caniter.cpp -${ICU_SOURCE_DIR}/common/unifilt.cpp -${ICU_SOURCE_DIR}/common/unifunct.cpp -${ICU_SOURCE_DIR}/common/uarrsort.cpp -${ICU_SOURCE_DIR}/common/brkiter.cpp -${ICU_SOURCE_DIR}/common/ubrk.cpp -${ICU_SOURCE_DIR}/common/brkeng.cpp -${ICU_SOURCE_DIR}/common/dictbe.cpp -${ICU_SOURCE_DIR}/common/filteredbrk.cpp -${ICU_SOURCE_DIR}/common/rbbi.cpp -${ICU_SOURCE_DIR}/common/rbbidata.cpp -${ICU_SOURCE_DIR}/common/rbbinode.cpp -${ICU_SOURCE_DIR}/common/rbbirb.cpp -${ICU_SOURCE_DIR}/common/rbbiscan.cpp -${ICU_SOURCE_DIR}/common/rbbisetb.cpp -${ICU_SOURCE_DIR}/common/rbbistbl.cpp -${ICU_SOURCE_DIR}/common/rbbitblb.cpp -${ICU_SOURCE_DIR}/common/rbbi_cache.cpp -${ICU_SOURCE_DIR}/common/serv.cpp -${ICU_SOURCE_DIR}/common/servnotf.cpp -${ICU_SOURCE_DIR}/common/servls.cpp -${ICU_SOURCE_DIR}/common/servlk.cpp -${ICU_SOURCE_DIR}/common/servlkf.cpp -${ICU_SOURCE_DIR}/common/servrbf.cpp -${ICU_SOURCE_DIR}/common/servslkf.cpp -${ICU_SOURCE_DIR}/common/uidna.cpp -${ICU_SOURCE_DIR}/common/usprep.cpp -${ICU_SOURCE_DIR}/common/uts46.cpp -${ICU_SOURCE_DIR}/common/punycode.cpp -${ICU_SOURCE_DIR}/common/util.cpp -${ICU_SOURCE_DIR}/common/util_props.cpp -${ICU_SOURCE_DIR}/common/parsepos.cpp -${ICU_SOURCE_DIR}/common/locbased.cpp -${ICU_SOURCE_DIR}/common/cwchar.cpp -${ICU_SOURCE_DIR}/common/wintz.cpp -${ICU_SOURCE_DIR}/common/dtintrv.cpp -${ICU_SOURCE_DIR}/common/ucnvsel.cpp -${ICU_SOURCE_DIR}/common/propsvec.cpp -${ICU_SOURCE_DIR}/common/ulist.cpp -${ICU_SOURCE_DIR}/common/uloc_tag.cpp -${ICU_SOURCE_DIR}/common/icudataver.cpp -${ICU_SOURCE_DIR}/common/icuplug.cpp -${ICU_SOURCE_DIR}/common/sharedobject.cpp -${ICU_SOURCE_DIR}/common/simpleformatter.cpp -${ICU_SOURCE_DIR}/common/unifiedcache.cpp -${ICU_SOURCE_DIR}/common/uloc_keytype.cpp -${ICU_SOURCE_DIR}/common/ubiditransform.cpp -${ICU_SOURCE_DIR}/common/pluralmap.cpp -${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp -${ICU_SOURCE_DIR}/common/restrace.cpp) +"${ICU_SOURCE_DIR}/common/errorcode.cpp" +"${ICU_SOURCE_DIR}/common/putil.cpp" +"${ICU_SOURCE_DIR}/common/umath.cpp" +"${ICU_SOURCE_DIR}/common/utypes.cpp" +"${ICU_SOURCE_DIR}/common/uinvchar.cpp" +"${ICU_SOURCE_DIR}/common/umutex.cpp" +"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp" +"${ICU_SOURCE_DIR}/common/uinit.cpp" +"${ICU_SOURCE_DIR}/common/uobject.cpp" +"${ICU_SOURCE_DIR}/common/cmemory.cpp" +"${ICU_SOURCE_DIR}/common/charstr.cpp" +"${ICU_SOURCE_DIR}/common/cstr.cpp" +"${ICU_SOURCE_DIR}/common/udata.cpp" +"${ICU_SOURCE_DIR}/common/ucmndata.cpp" +"${ICU_SOURCE_DIR}/common/udatamem.cpp" +"${ICU_SOURCE_DIR}/common/umapfile.cpp" +"${ICU_SOURCE_DIR}/common/udataswp.cpp" +"${ICU_SOURCE_DIR}/common/utrie_swap.cpp" +"${ICU_SOURCE_DIR}/common/ucol_swp.cpp" +"${ICU_SOURCE_DIR}/common/utrace.cpp" +"${ICU_SOURCE_DIR}/common/uhash.cpp" +"${ICU_SOURCE_DIR}/common/uhash_us.cpp" +"${ICU_SOURCE_DIR}/common/uenum.cpp" +"${ICU_SOURCE_DIR}/common/ustrenum.cpp" +"${ICU_SOURCE_DIR}/common/uvector.cpp" +"${ICU_SOURCE_DIR}/common/ustack.cpp" +"${ICU_SOURCE_DIR}/common/uvectr32.cpp" +"${ICU_SOURCE_DIR}/common/uvectr64.cpp" +"${ICU_SOURCE_DIR}/common/ucnv.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_io.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_err.cpp" +"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp" +"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp" +"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp" +"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp" +"${ICU_SOURCE_DIR}/common/ucnv2022.cpp" +"${ICU_SOURCE_DIR}/common/ucnvhz.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp" +"${ICU_SOURCE_DIR}/common/ucnvisci.cpp" +"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_set.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp" +"${ICU_SOURCE_DIR}/common/resource.cpp" +"${ICU_SOURCE_DIR}/common/uresbund.cpp" +"${ICU_SOURCE_DIR}/common/ures_cnv.cpp" +"${ICU_SOURCE_DIR}/common/uresdata.cpp" +"${ICU_SOURCE_DIR}/common/resbund.cpp" +"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp" +"${ICU_SOURCE_DIR}/common/ucurr.cpp" +"${ICU_SOURCE_DIR}/common/localebuilder.cpp" +"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp" +"${ICU_SOURCE_DIR}/common/messagepattern.cpp" +"${ICU_SOURCE_DIR}/common/ucat.cpp" +"${ICU_SOURCE_DIR}/common/locmap.cpp" +"${ICU_SOURCE_DIR}/common/uloc.cpp" +"${ICU_SOURCE_DIR}/common/locid.cpp" +"${ICU_SOURCE_DIR}/common/locutil.cpp" +"${ICU_SOURCE_DIR}/common/locavailable.cpp" +"${ICU_SOURCE_DIR}/common/locdispnames.cpp" +"${ICU_SOURCE_DIR}/common/locdspnm.cpp" +"${ICU_SOURCE_DIR}/common/loclikely.cpp" +"${ICU_SOURCE_DIR}/common/locresdata.cpp" +"${ICU_SOURCE_DIR}/common/lsr.cpp" +"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp" +"${ICU_SOURCE_DIR}/common/locdistance.cpp" +"${ICU_SOURCE_DIR}/common/localematcher.cpp" +"${ICU_SOURCE_DIR}/common/bytestream.cpp" +"${ICU_SOURCE_DIR}/common/stringpiece.cpp" +"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp" +"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/bytestrie.cpp" +"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp" +"${ICU_SOURCE_DIR}/common/ucharstrie.cpp" +"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp" +"${ICU_SOURCE_DIR}/common/dictionarydata.cpp" +"${ICU_SOURCE_DIR}/common/edits.cpp" +"${ICU_SOURCE_DIR}/common/appendable.cpp" +"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp" +"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp" +"${ICU_SOURCE_DIR}/common/unistr.cpp" +"${ICU_SOURCE_DIR}/common/unistr_case.cpp" +"${ICU_SOURCE_DIR}/common/unistr_props.cpp" +"${ICU_SOURCE_DIR}/common/utf_impl.cpp" +"${ICU_SOURCE_DIR}/common/ustring.cpp" +"${ICU_SOURCE_DIR}/common/ustrcase.cpp" +"${ICU_SOURCE_DIR}/common/ucasemap.cpp" +"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/cstring.cpp" +"${ICU_SOURCE_DIR}/common/ustrfmt.cpp" +"${ICU_SOURCE_DIR}/common/ustrtrns.cpp" +"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp" +"${ICU_SOURCE_DIR}/common/utext.cpp" +"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp" +"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp" +"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp" +"${ICU_SOURCE_DIR}/common/normalizer2.cpp" +"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp" +"${ICU_SOURCE_DIR}/common/normlzr.cpp" +"${ICU_SOURCE_DIR}/common/unorm.cpp" +"${ICU_SOURCE_DIR}/common/unormcmp.cpp" +"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp" +"${ICU_SOURCE_DIR}/common/chariter.cpp" +"${ICU_SOURCE_DIR}/common/schriter.cpp" +"${ICU_SOURCE_DIR}/common/uchriter.cpp" +"${ICU_SOURCE_DIR}/common/uiter.cpp" +"${ICU_SOURCE_DIR}/common/patternprops.cpp" +"${ICU_SOURCE_DIR}/common/uchar.cpp" +"${ICU_SOURCE_DIR}/common/uprops.cpp" +"${ICU_SOURCE_DIR}/common/ucase.cpp" +"${ICU_SOURCE_DIR}/common/propname.cpp" +"${ICU_SOURCE_DIR}/common/ubidi_props.cpp" +"${ICU_SOURCE_DIR}/common/characterproperties.cpp" +"${ICU_SOURCE_DIR}/common/ubidi.cpp" +"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp" +"${ICU_SOURCE_DIR}/common/ubidiln.cpp" +"${ICU_SOURCE_DIR}/common/ushape.cpp" +"${ICU_SOURCE_DIR}/common/uscript.cpp" +"${ICU_SOURCE_DIR}/common/uscript_props.cpp" +"${ICU_SOURCE_DIR}/common/usc_impl.cpp" +"${ICU_SOURCE_DIR}/common/unames.cpp" +"${ICU_SOURCE_DIR}/common/utrie.cpp" +"${ICU_SOURCE_DIR}/common/utrie2.cpp" +"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp" +"${ICU_SOURCE_DIR}/common/ucptrie.cpp" +"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp" +"${ICU_SOURCE_DIR}/common/bmpset.cpp" +"${ICU_SOURCE_DIR}/common/unisetspan.cpp" +"${ICU_SOURCE_DIR}/common/uset_props.cpp" +"${ICU_SOURCE_DIR}/common/uniset_props.cpp" +"${ICU_SOURCE_DIR}/common/uniset_closure.cpp" +"${ICU_SOURCE_DIR}/common/uset.cpp" +"${ICU_SOURCE_DIR}/common/uniset.cpp" +"${ICU_SOURCE_DIR}/common/usetiter.cpp" +"${ICU_SOURCE_DIR}/common/ruleiter.cpp" +"${ICU_SOURCE_DIR}/common/caniter.cpp" +"${ICU_SOURCE_DIR}/common/unifilt.cpp" +"${ICU_SOURCE_DIR}/common/unifunct.cpp" +"${ICU_SOURCE_DIR}/common/uarrsort.cpp" +"${ICU_SOURCE_DIR}/common/brkiter.cpp" +"${ICU_SOURCE_DIR}/common/ubrk.cpp" +"${ICU_SOURCE_DIR}/common/brkeng.cpp" +"${ICU_SOURCE_DIR}/common/dictbe.cpp" +"${ICU_SOURCE_DIR}/common/filteredbrk.cpp" +"${ICU_SOURCE_DIR}/common/rbbi.cpp" +"${ICU_SOURCE_DIR}/common/rbbidata.cpp" +"${ICU_SOURCE_DIR}/common/rbbinode.cpp" +"${ICU_SOURCE_DIR}/common/rbbirb.cpp" +"${ICU_SOURCE_DIR}/common/rbbiscan.cpp" +"${ICU_SOURCE_DIR}/common/rbbisetb.cpp" +"${ICU_SOURCE_DIR}/common/rbbistbl.cpp" +"${ICU_SOURCE_DIR}/common/rbbitblb.cpp" +"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp" +"${ICU_SOURCE_DIR}/common/serv.cpp" +"${ICU_SOURCE_DIR}/common/servnotf.cpp" +"${ICU_SOURCE_DIR}/common/servls.cpp" +"${ICU_SOURCE_DIR}/common/servlk.cpp" +"${ICU_SOURCE_DIR}/common/servlkf.cpp" +"${ICU_SOURCE_DIR}/common/servrbf.cpp" +"${ICU_SOURCE_DIR}/common/servslkf.cpp" +"${ICU_SOURCE_DIR}/common/uidna.cpp" +"${ICU_SOURCE_DIR}/common/usprep.cpp" +"${ICU_SOURCE_DIR}/common/uts46.cpp" +"${ICU_SOURCE_DIR}/common/punycode.cpp" +"${ICU_SOURCE_DIR}/common/util.cpp" +"${ICU_SOURCE_DIR}/common/util_props.cpp" +"${ICU_SOURCE_DIR}/common/parsepos.cpp" +"${ICU_SOURCE_DIR}/common/locbased.cpp" +"${ICU_SOURCE_DIR}/common/cwchar.cpp" +"${ICU_SOURCE_DIR}/common/wintz.cpp" +"${ICU_SOURCE_DIR}/common/dtintrv.cpp" +"${ICU_SOURCE_DIR}/common/ucnvsel.cpp" +"${ICU_SOURCE_DIR}/common/propsvec.cpp" +"${ICU_SOURCE_DIR}/common/ulist.cpp" +"${ICU_SOURCE_DIR}/common/uloc_tag.cpp" +"${ICU_SOURCE_DIR}/common/icudataver.cpp" +"${ICU_SOURCE_DIR}/common/icuplug.cpp" +"${ICU_SOURCE_DIR}/common/sharedobject.cpp" +"${ICU_SOURCE_DIR}/common/simpleformatter.cpp" +"${ICU_SOURCE_DIR}/common/unifiedcache.cpp" +"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp" +"${ICU_SOURCE_DIR}/common/ubiditransform.cpp" +"${ICU_SOURCE_DIR}/common/pluralmap.cpp" +"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp" +"${ICU_SOURCE_DIR}/common/restrace.cpp") set(ICUI18N_SOURCES -${ICU_SOURCE_DIR}/i18n/ucln_in.cpp -${ICU_SOURCE_DIR}/i18n/fmtable.cpp -${ICU_SOURCE_DIR}/i18n/format.cpp -${ICU_SOURCE_DIR}/i18n/msgfmt.cpp -${ICU_SOURCE_DIR}/i18n/umsg.cpp -${ICU_SOURCE_DIR}/i18n/numfmt.cpp -${ICU_SOURCE_DIR}/i18n/unum.cpp -${ICU_SOURCE_DIR}/i18n/decimfmt.cpp -${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp -${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp -${ICU_SOURCE_DIR}/i18n/choicfmt.cpp -${ICU_SOURCE_DIR}/i18n/datefmt.cpp -${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp -${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp -${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp -${ICU_SOURCE_DIR}/i18n/udat.cpp -${ICU_SOURCE_DIR}/i18n/dtptngen.cpp -${ICU_SOURCE_DIR}/i18n/udatpg.cpp -${ICU_SOURCE_DIR}/i18n/nfrs.cpp -${ICU_SOURCE_DIR}/i18n/nfrule.cpp -${ICU_SOURCE_DIR}/i18n/nfsubs.cpp -${ICU_SOURCE_DIR}/i18n/rbnf.cpp -${ICU_SOURCE_DIR}/i18n/numsys.cpp -${ICU_SOURCE_DIR}/i18n/unumsys.cpp -${ICU_SOURCE_DIR}/i18n/ucsdet.cpp -${ICU_SOURCE_DIR}/i18n/ucal.cpp -${ICU_SOURCE_DIR}/i18n/calendar.cpp -${ICU_SOURCE_DIR}/i18n/gregocal.cpp -${ICU_SOURCE_DIR}/i18n/timezone.cpp -${ICU_SOURCE_DIR}/i18n/simpletz.cpp -${ICU_SOURCE_DIR}/i18n/olsontz.cpp -${ICU_SOURCE_DIR}/i18n/astro.cpp -${ICU_SOURCE_DIR}/i18n/taiwncal.cpp -${ICU_SOURCE_DIR}/i18n/buddhcal.cpp -${ICU_SOURCE_DIR}/i18n/persncal.cpp -${ICU_SOURCE_DIR}/i18n/islamcal.cpp -${ICU_SOURCE_DIR}/i18n/japancal.cpp -${ICU_SOURCE_DIR}/i18n/gregoimp.cpp -${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp -${ICU_SOURCE_DIR}/i18n/indiancal.cpp -${ICU_SOURCE_DIR}/i18n/chnsecal.cpp -${ICU_SOURCE_DIR}/i18n/cecal.cpp -${ICU_SOURCE_DIR}/i18n/coptccal.cpp -${ICU_SOURCE_DIR}/i18n/dangical.cpp -${ICU_SOURCE_DIR}/i18n/ethpccal.cpp -${ICU_SOURCE_DIR}/i18n/coleitr.cpp -${ICU_SOURCE_DIR}/i18n/coll.cpp -${ICU_SOURCE_DIR}/i18n/sortkey.cpp -${ICU_SOURCE_DIR}/i18n/bocsu.cpp -${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp -${ICU_SOURCE_DIR}/i18n/ucol.cpp -${ICU_SOURCE_DIR}/i18n/ucol_res.cpp -${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp -${ICU_SOURCE_DIR}/i18n/collation.cpp -${ICU_SOURCE_DIR}/i18n/collationsettings.cpp -${ICU_SOURCE_DIR}/i18n/collationdata.cpp -${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp -${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp -${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp -${ICU_SOURCE_DIR}/i18n/collationfcd.cpp -${ICU_SOURCE_DIR}/i18n/collationiterator.cpp -${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp -${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp -${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp -${ICU_SOURCE_DIR}/i18n/collationsets.cpp -${ICU_SOURCE_DIR}/i18n/collationcompare.cpp -${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp -${ICU_SOURCE_DIR}/i18n/collationkeys.cpp -${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp -${ICU_SOURCE_DIR}/i18n/collationroot.cpp -${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp -${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp -${ICU_SOURCE_DIR}/i18n/collationweights.cpp -${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp -${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp -${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp -${ICU_SOURCE_DIR}/i18n/listformatter.cpp -${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp -${ICU_SOURCE_DIR}/i18n/strmatch.cpp -${ICU_SOURCE_DIR}/i18n/usearch.cpp -${ICU_SOURCE_DIR}/i18n/search.cpp -${ICU_SOURCE_DIR}/i18n/stsearch.cpp -${ICU_SOURCE_DIR}/i18n/translit.cpp -${ICU_SOURCE_DIR}/i18n/utrans.cpp -${ICU_SOURCE_DIR}/i18n/esctrn.cpp -${ICU_SOURCE_DIR}/i18n/unesctrn.cpp -${ICU_SOURCE_DIR}/i18n/funcrepl.cpp -${ICU_SOURCE_DIR}/i18n/strrepl.cpp -${ICU_SOURCE_DIR}/i18n/tridpars.cpp -${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp -${ICU_SOURCE_DIR}/i18n/rbt.cpp -${ICU_SOURCE_DIR}/i18n/rbt_data.cpp -${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp -${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp -${ICU_SOURCE_DIR}/i18n/rbt_set.cpp -${ICU_SOURCE_DIR}/i18n/nultrans.cpp -${ICU_SOURCE_DIR}/i18n/remtrans.cpp -${ICU_SOURCE_DIR}/i18n/casetrn.cpp -${ICU_SOURCE_DIR}/i18n/titletrn.cpp -${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp -${ICU_SOURCE_DIR}/i18n/toupptrn.cpp -${ICU_SOURCE_DIR}/i18n/anytrans.cpp -${ICU_SOURCE_DIR}/i18n/name2uni.cpp -${ICU_SOURCE_DIR}/i18n/uni2name.cpp -${ICU_SOURCE_DIR}/i18n/nortrans.cpp -${ICU_SOURCE_DIR}/i18n/quant.cpp -${ICU_SOURCE_DIR}/i18n/transreg.cpp -${ICU_SOURCE_DIR}/i18n/brktrans.cpp -${ICU_SOURCE_DIR}/i18n/regexcmp.cpp -${ICU_SOURCE_DIR}/i18n/rematch.cpp -${ICU_SOURCE_DIR}/i18n/repattrn.cpp -${ICU_SOURCE_DIR}/i18n/regexst.cpp -${ICU_SOURCE_DIR}/i18n/regextxt.cpp -${ICU_SOURCE_DIR}/i18n/regeximp.cpp -${ICU_SOURCE_DIR}/i18n/uregex.cpp -${ICU_SOURCE_DIR}/i18n/uregexc.cpp -${ICU_SOURCE_DIR}/i18n/ulocdata.cpp -${ICU_SOURCE_DIR}/i18n/measfmt.cpp -${ICU_SOURCE_DIR}/i18n/currfmt.cpp -${ICU_SOURCE_DIR}/i18n/curramt.cpp -${ICU_SOURCE_DIR}/i18n/currunit.cpp -${ICU_SOURCE_DIR}/i18n/measure.cpp -${ICU_SOURCE_DIR}/i18n/utmscale.cpp -${ICU_SOURCE_DIR}/i18n/csdetect.cpp -${ICU_SOURCE_DIR}/i18n/csmatch.cpp -${ICU_SOURCE_DIR}/i18n/csr2022.cpp -${ICU_SOURCE_DIR}/i18n/csrecog.cpp -${ICU_SOURCE_DIR}/i18n/csrmbcs.cpp -${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp -${ICU_SOURCE_DIR}/i18n/csrucode.cpp -${ICU_SOURCE_DIR}/i18n/csrutf8.cpp -${ICU_SOURCE_DIR}/i18n/inputext.cpp -${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp -${ICU_SOURCE_DIR}/i18n/windtfmt.cpp -${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp -${ICU_SOURCE_DIR}/i18n/basictz.cpp -${ICU_SOURCE_DIR}/i18n/dtrule.cpp -${ICU_SOURCE_DIR}/i18n/rbtz.cpp -${ICU_SOURCE_DIR}/i18n/tzrule.cpp -${ICU_SOURCE_DIR}/i18n/tztrans.cpp -${ICU_SOURCE_DIR}/i18n/vtzone.cpp -${ICU_SOURCE_DIR}/i18n/zonemeta.cpp -${ICU_SOURCE_DIR}/i18n/standardplural.cpp -${ICU_SOURCE_DIR}/i18n/upluralrules.cpp -${ICU_SOURCE_DIR}/i18n/plurrule.cpp -${ICU_SOURCE_DIR}/i18n/plurfmt.cpp -${ICU_SOURCE_DIR}/i18n/selfmt.cpp -${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp -${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp -${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp -${ICU_SOURCE_DIR}/i18n/tmunit.cpp -${ICU_SOURCE_DIR}/i18n/tmutamt.cpp -${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp -${ICU_SOURCE_DIR}/i18n/currpinf.cpp -${ICU_SOURCE_DIR}/i18n/uspoof.cpp -${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp -${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp -${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp -${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp -${ICU_SOURCE_DIR}/i18n/ztrans.cpp -${ICU_SOURCE_DIR}/i18n/zrule.cpp -${ICU_SOURCE_DIR}/i18n/vzone.cpp -${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp -${ICU_SOURCE_DIR}/i18n/fpositer.cpp -${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp -${ICU_SOURCE_DIR}/i18n/decNumber.cpp -${ICU_SOURCE_DIR}/i18n/decContext.cpp -${ICU_SOURCE_DIR}/i18n/alphaindex.cpp -${ICU_SOURCE_DIR}/i18n/tznames.cpp -${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp -${ICU_SOURCE_DIR}/i18n/tzgnames.cpp -${ICU_SOURCE_DIR}/i18n/tzfmt.cpp -${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp -${ICU_SOURCE_DIR}/i18n/gender.cpp -${ICU_SOURCE_DIR}/i18n/region.cpp -${ICU_SOURCE_DIR}/i18n/scriptset.cpp -${ICU_SOURCE_DIR}/i18n/uregion.cpp -${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp -${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp -${ICU_SOURCE_DIR}/i18n/measunit.cpp -${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp -${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp -${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp -${ICU_SOURCE_DIR}/i18n/nounit.cpp -${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp -${ICU_SOURCE_DIR}/i18n/number_compact.cpp -${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp -${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp -${ICU_SOURCE_DIR}/i18n/number_fluent.cpp -${ICU_SOURCE_DIR}/i18n/number_formatimpl.cpp -${ICU_SOURCE_DIR}/i18n/number_grouping.cpp -${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp -${ICU_SOURCE_DIR}/i18n/number_longnames.cpp -${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp -${ICU_SOURCE_DIR}/i18n/number_notation.cpp -${ICU_SOURCE_DIR}/i18n/number_output.cpp -${ICU_SOURCE_DIR}/i18n/number_padding.cpp -${ICU_SOURCE_DIR}/i18n/number_patternmodifier.cpp -${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp -${ICU_SOURCE_DIR}/i18n/number_rounding.cpp -${ICU_SOURCE_DIR}/i18n/number_scientific.cpp -${ICU_SOURCE_DIR}/i18n/number_utils.cpp -${ICU_SOURCE_DIR}/i18n/number_asformat.cpp -${ICU_SOURCE_DIR}/i18n/number_mapper.cpp -${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp -${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp -${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp -${ICU_SOURCE_DIR}/i18n/number_capi.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp -${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp -${ICU_SOURCE_DIR}/i18n/string_segment.cpp -${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp -${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp -${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp -${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp -${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp -${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp -${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp -${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp -${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp -${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp -${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp -${ICU_SOURCE_DIR}/i18n/erarules.cpp -${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp -${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp -${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp -${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp) +"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp" +"${ICU_SOURCE_DIR}/i18n/fmtable.cpp" +"${ICU_SOURCE_DIR}/i18n/format.cpp" +"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/umsg.cpp" +"${ICU_SOURCE_DIR}/i18n/numfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/unum.cpp" +"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp" +"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp" +"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/datefmt.cpp" +"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp" +"${ICU_SOURCE_DIR}/i18n/udat.cpp" +"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp" +"${ICU_SOURCE_DIR}/i18n/udatpg.cpp" +"${ICU_SOURCE_DIR}/i18n/nfrs.cpp" +"${ICU_SOURCE_DIR}/i18n/nfrule.cpp" +"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp" +"${ICU_SOURCE_DIR}/i18n/rbnf.cpp" +"${ICU_SOURCE_DIR}/i18n/numsys.cpp" +"${ICU_SOURCE_DIR}/i18n/unumsys.cpp" +"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp" +"${ICU_SOURCE_DIR}/i18n/ucal.cpp" +"${ICU_SOURCE_DIR}/i18n/calendar.cpp" +"${ICU_SOURCE_DIR}/i18n/gregocal.cpp" +"${ICU_SOURCE_DIR}/i18n/timezone.cpp" +"${ICU_SOURCE_DIR}/i18n/simpletz.cpp" +"${ICU_SOURCE_DIR}/i18n/olsontz.cpp" +"${ICU_SOURCE_DIR}/i18n/astro.cpp" +"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp" +"${ICU_SOURCE_DIR}/i18n/buddhcal.cpp" +"${ICU_SOURCE_DIR}/i18n/persncal.cpp" +"${ICU_SOURCE_DIR}/i18n/islamcal.cpp" +"${ICU_SOURCE_DIR}/i18n/japancal.cpp" +"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp" +"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp" +"${ICU_SOURCE_DIR}/i18n/indiancal.cpp" +"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp" +"${ICU_SOURCE_DIR}/i18n/cecal.cpp" +"${ICU_SOURCE_DIR}/i18n/coptccal.cpp" +"${ICU_SOURCE_DIR}/i18n/dangical.cpp" +"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp" +"${ICU_SOURCE_DIR}/i18n/coleitr.cpp" +"${ICU_SOURCE_DIR}/i18n/coll.cpp" +"${ICU_SOURCE_DIR}/i18n/sortkey.cpp" +"${ICU_SOURCE_DIR}/i18n/bocsu.cpp" +"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp" +"${ICU_SOURCE_DIR}/i18n/collation.cpp" +"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp" +"${ICU_SOURCE_DIR}/i18n/collationdata.cpp" +"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp" +"${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp" +"${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp" +"${ICU_SOURCE_DIR}/i18n/collationfcd.cpp" +"${ICU_SOURCE_DIR}/i18n/collationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/collationsets.cpp" +"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp" +"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp" +"${ICU_SOURCE_DIR}/i18n/collationkeys.cpp" +"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp" +"${ICU_SOURCE_DIR}/i18n/collationroot.cpp" +"${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp" +"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp" +"${ICU_SOURCE_DIR}/i18n/collationweights.cpp" +"${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp" +"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp" +"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp" +"${ICU_SOURCE_DIR}/i18n/listformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/strmatch.cpp" +"${ICU_SOURCE_DIR}/i18n/usearch.cpp" +"${ICU_SOURCE_DIR}/i18n/search.cpp" +"${ICU_SOURCE_DIR}/i18n/stsearch.cpp" +"${ICU_SOURCE_DIR}/i18n/translit.cpp" +"${ICU_SOURCE_DIR}/i18n/utrans.cpp" +"${ICU_SOURCE_DIR}/i18n/esctrn.cpp" +"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp" +"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp" +"${ICU_SOURCE_DIR}/i18n/strrepl.cpp" +"${ICU_SOURCE_DIR}/i18n/tridpars.cpp" +"${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp" +"${ICU_SOURCE_DIR}/i18n/nultrans.cpp" +"${ICU_SOURCE_DIR}/i18n/remtrans.cpp" +"${ICU_SOURCE_DIR}/i18n/casetrn.cpp" +"${ICU_SOURCE_DIR}/i18n/titletrn.cpp" +"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp" +"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp" +"${ICU_SOURCE_DIR}/i18n/anytrans.cpp" +"${ICU_SOURCE_DIR}/i18n/name2uni.cpp" +"${ICU_SOURCE_DIR}/i18n/uni2name.cpp" +"${ICU_SOURCE_DIR}/i18n/nortrans.cpp" +"${ICU_SOURCE_DIR}/i18n/quant.cpp" +"${ICU_SOURCE_DIR}/i18n/transreg.cpp" +"${ICU_SOURCE_DIR}/i18n/brktrans.cpp" +"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp" +"${ICU_SOURCE_DIR}/i18n/rematch.cpp" +"${ICU_SOURCE_DIR}/i18n/repattrn.cpp" +"${ICU_SOURCE_DIR}/i18n/regexst.cpp" +"${ICU_SOURCE_DIR}/i18n/regextxt.cpp" +"${ICU_SOURCE_DIR}/i18n/regeximp.cpp" +"${ICU_SOURCE_DIR}/i18n/uregex.cpp" +"${ICU_SOURCE_DIR}/i18n/uregexc.cpp" +"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp" +"${ICU_SOURCE_DIR}/i18n/measfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/currfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/curramt.cpp" +"${ICU_SOURCE_DIR}/i18n/currunit.cpp" +"${ICU_SOURCE_DIR}/i18n/measure.cpp" +"${ICU_SOURCE_DIR}/i18n/utmscale.cpp" +"${ICU_SOURCE_DIR}/i18n/csdetect.cpp" +"${ICU_SOURCE_DIR}/i18n/csmatch.cpp" +"${ICU_SOURCE_DIR}/i18n/csr2022.cpp" +"${ICU_SOURCE_DIR}/i18n/csrecog.cpp" +"${ICU_SOURCE_DIR}/i18n/csrmbcs.cpp" +"${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp" +"${ICU_SOURCE_DIR}/i18n/csrucode.cpp" +"${ICU_SOURCE_DIR}/i18n/csrutf8.cpp" +"${ICU_SOURCE_DIR}/i18n/inputext.cpp" +"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/basictz.cpp" +"${ICU_SOURCE_DIR}/i18n/dtrule.cpp" +"${ICU_SOURCE_DIR}/i18n/rbtz.cpp" +"${ICU_SOURCE_DIR}/i18n/tzrule.cpp" +"${ICU_SOURCE_DIR}/i18n/tztrans.cpp" +"${ICU_SOURCE_DIR}/i18n/vtzone.cpp" +"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp" +"${ICU_SOURCE_DIR}/i18n/standardplural.cpp" +"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp" +"${ICU_SOURCE_DIR}/i18n/plurrule.cpp" +"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/selfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp" +"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp" +"${ICU_SOURCE_DIR}/i18n/tmunit.cpp" +"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp" +"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/currpinf.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp" +"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp" +"${ICU_SOURCE_DIR}/i18n/ztrans.cpp" +"${ICU_SOURCE_DIR}/i18n/zrule.cpp" +"${ICU_SOURCE_DIR}/i18n/vzone.cpp" +"${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp" +"${ICU_SOURCE_DIR}/i18n/fpositer.cpp" +"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp" +"${ICU_SOURCE_DIR}/i18n/decNumber.cpp" +"${ICU_SOURCE_DIR}/i18n/decContext.cpp" +"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp" +"${ICU_SOURCE_DIR}/i18n/tznames.cpp" +"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp" +"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp" +"${ICU_SOURCE_DIR}/i18n/gender.cpp" +"${ICU_SOURCE_DIR}/i18n/region.cpp" +"${ICU_SOURCE_DIR}/i18n/scriptset.cpp" +"${ICU_SOURCE_DIR}/i18n/uregion.cpp" +"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp" +"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/measunit.cpp" +"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp" +"${ICU_SOURCE_DIR}/i18n/nounit.cpp" +"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp" +"${ICU_SOURCE_DIR}/i18n/number_compact.cpp" +"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp" +"${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp" +"${ICU_SOURCE_DIR}/i18n/number_fluent.cpp" +"${ICU_SOURCE_DIR}/i18n/number_formatimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/number_grouping.cpp" +"${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp" +"${ICU_SOURCE_DIR}/i18n/number_longnames.cpp" +"${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp" +"${ICU_SOURCE_DIR}/i18n/number_notation.cpp" +"${ICU_SOURCE_DIR}/i18n/number_output.cpp" +"${ICU_SOURCE_DIR}/i18n/number_padding.cpp" +"${ICU_SOURCE_DIR}/i18n/number_patternmodifier.cpp" +"${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp" +"${ICU_SOURCE_DIR}/i18n/number_rounding.cpp" +"${ICU_SOURCE_DIR}/i18n/number_scientific.cpp" +"${ICU_SOURCE_DIR}/i18n/number_utils.cpp" +"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp" +"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp" +"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp" +"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp" +"${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp" +"${ICU_SOURCE_DIR}/i18n/number_capi.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp" +"${ICU_SOURCE_DIR}/i18n/string_segment.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp" +"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp" +"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/erarules.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp") -file(GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp CONTENT " ") +file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ") enable_language(ASM) set(ICUDATA_SOURCES - ${ICUDATA_SOURCE_DIR}/icudt66l_dat.S - ${CMAKE_CURRENT_BINARY_DIR}/empty.cpp # Without this cmake can incorrectly detects library type (OBJECT) instead of SHARED/STATIC + "${ICUDATA_SOURCE_DIR}/icudt66l_dat.S" + "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" # Without this cmake can incorrectly detects library type (OBJECT) instead of SHARED/STATIC ) # Note that we don't like any kind of binary plugins (because of runtime dependencies, vulnerabilities, ABI incompatibilities). @@ -454,8 +454,8 @@ add_library(icudata ${ICUDATA_SOURCES}) target_link_libraries(icuuc PRIVATE icudata) target_link_libraries(icui18n PRIVATE icuuc) -target_include_directories(icuuc SYSTEM PUBLIC ${ICU_SOURCE_DIR}/common/) -target_include_directories(icui18n SYSTEM PUBLIC ${ICU_SOURCE_DIR}/i18n/) +target_include_directories(icuuc SYSTEM PUBLIC "${ICU_SOURCE_DIR}/common/") +target_include_directories(icui18n SYSTEM PUBLIC "${ICU_SOURCE_DIR}/i18n/") target_compile_definitions(icuuc PRIVATE -DU_COMMON_IMPLEMENTATION) target_compile_definitions(icui18n PRIVATE -DU_I18N_IMPLEMENTATION) diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index 73afa99f1d8..140b7eb370b 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -1,10 +1,13 @@ -if (SANITIZE OR NOT (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE) OR NOT (OS_LINUX OR OS_FREEBSD OR OS_DARWIN)) +if (SANITIZE OR NOT ( + ((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE)) OR + (OS_DARWIN AND CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") +)) if (ENABLE_JEMALLOC) message (${RECONFIGURE_MESSAGE_LEVEL} - "jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64 or ppc64le on linux or freebsd.") - endif() + "jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds.") + endif () set (ENABLE_JEMALLOC OFF) -else() +else () option (ENABLE_JEMALLOC "Enable jemalloc allocator" ${ENABLE_LIBRARIES}) endif () @@ -34,9 +37,9 @@ if (OS_LINUX) # avoid spurious latencies and additional work associated with # MADV_DONTNEED. See # https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation. - set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:10000") + set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000") else() - set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:10000") + set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000") endif() # CACHE variable is empty, to allow changing defaults without necessity # to purge cache @@ -49,46 +52,46 @@ message (STATUS "jemalloc malloc_conf: ${JEMALLOC_CONFIG_MALLOC_CONF}") set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/jemalloc") set (SRCS - ${LIBRARY_DIR}/src/arena.c - ${LIBRARY_DIR}/src/background_thread.c - ${LIBRARY_DIR}/src/base.c - ${LIBRARY_DIR}/src/bin.c - ${LIBRARY_DIR}/src/bitmap.c - ${LIBRARY_DIR}/src/ckh.c - ${LIBRARY_DIR}/src/ctl.c - ${LIBRARY_DIR}/src/div.c - ${LIBRARY_DIR}/src/extent.c - ${LIBRARY_DIR}/src/extent_dss.c - ${LIBRARY_DIR}/src/extent_mmap.c - ${LIBRARY_DIR}/src/hash.c - ${LIBRARY_DIR}/src/hook.c - ${LIBRARY_DIR}/src/jemalloc.c - ${LIBRARY_DIR}/src/large.c - ${LIBRARY_DIR}/src/log.c - ${LIBRARY_DIR}/src/malloc_io.c - ${LIBRARY_DIR}/src/mutex.c - ${LIBRARY_DIR}/src/mutex_pool.c - ${LIBRARY_DIR}/src/nstime.c - ${LIBRARY_DIR}/src/pages.c - ${LIBRARY_DIR}/src/prng.c - ${LIBRARY_DIR}/src/prof.c - ${LIBRARY_DIR}/src/rtree.c - ${LIBRARY_DIR}/src/sc.c - ${LIBRARY_DIR}/src/stats.c - ${LIBRARY_DIR}/src/sz.c - ${LIBRARY_DIR}/src/tcache.c - ${LIBRARY_DIR}/src/test_hooks.c - ${LIBRARY_DIR}/src/ticker.c - ${LIBRARY_DIR}/src/tsd.c - ${LIBRARY_DIR}/src/witness.c - ${LIBRARY_DIR}/src/safety_check.c + "${LIBRARY_DIR}/src/arena.c" + "${LIBRARY_DIR}/src/background_thread.c" + "${LIBRARY_DIR}/src/base.c" + "${LIBRARY_DIR}/src/bin.c" + "${LIBRARY_DIR}/src/bitmap.c" + "${LIBRARY_DIR}/src/ckh.c" + "${LIBRARY_DIR}/src/ctl.c" + "${LIBRARY_DIR}/src/div.c" + "${LIBRARY_DIR}/src/extent.c" + "${LIBRARY_DIR}/src/extent_dss.c" + "${LIBRARY_DIR}/src/extent_mmap.c" + "${LIBRARY_DIR}/src/hash.c" + "${LIBRARY_DIR}/src/hook.c" + "${LIBRARY_DIR}/src/jemalloc.c" + "${LIBRARY_DIR}/src/large.c" + "${LIBRARY_DIR}/src/log.c" + "${LIBRARY_DIR}/src/malloc_io.c" + "${LIBRARY_DIR}/src/mutex.c" + "${LIBRARY_DIR}/src/mutex_pool.c" + "${LIBRARY_DIR}/src/nstime.c" + "${LIBRARY_DIR}/src/pages.c" + "${LIBRARY_DIR}/src/prng.c" + "${LIBRARY_DIR}/src/prof.c" + "${LIBRARY_DIR}/src/rtree.c" + "${LIBRARY_DIR}/src/sc.c" + "${LIBRARY_DIR}/src/stats.c" + "${LIBRARY_DIR}/src/sz.c" + "${LIBRARY_DIR}/src/tcache.c" + "${LIBRARY_DIR}/src/test_hooks.c" + "${LIBRARY_DIR}/src/ticker.c" + "${LIBRARY_DIR}/src/tsd.c" + "${LIBRARY_DIR}/src/witness.c" + "${LIBRARY_DIR}/src/safety_check.c" ) if (OS_DARWIN) - list(APPEND SRCS ${LIBRARY_DIR}/src/zone.c) + list(APPEND SRCS "${LIBRARY_DIR}/src/zone.c") endif () add_library(jemalloc ${SRCS}) -target_include_directories(jemalloc PRIVATE ${LIBRARY_DIR}/include) +target_include_directories(jemalloc PRIVATE "${LIBRARY_DIR}/include") target_include_directories(jemalloc SYSTEM PUBLIC include) set (JEMALLOC_INCLUDE_PREFIX) @@ -116,17 +119,19 @@ endif () configure_file(${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal/jemalloc_internal_defs.h.in ${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal/jemalloc_internal_defs.h) target_include_directories(jemalloc SYSTEM PRIVATE - ${CMAKE_CURRENT_BINARY_DIR}/${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal) + "${CMAKE_CURRENT_BINARY_DIR}/${JEMALLOC_INCLUDE_PREFIX}/jemalloc/internal") target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_NO_PRIVATE_NAMESPACE) if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") - target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_DEBUG=1 -DJEMALLOC_PROF=1) + target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_DEBUG=1) +endif () - if (USE_UNWIND) - target_compile_definitions (jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1) - target_link_libraries (jemalloc PRIVATE unwind) - endif () +target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_PROF=1) + +if (USE_UNWIND) + target_compile_definitions (jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1) + target_link_libraries (jemalloc PRIVATE unwind) endif () target_compile_options(jemalloc PRIVATE -Wno-redundant-decls) diff --git a/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in index c7c884d0eaa..5c0407db24a 100644 --- a/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_darwin_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in @@ -42,7 +42,7 @@ * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 * bits are the same as bit 47. */ -#define LG_VADDR 48 +#define LG_VADDR 64 /* Defined if C11 atomics are available. */ #define JEMALLOC_C11_ATOMICS 1 @@ -101,11 +101,6 @@ */ #define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1 -/* - * Defined if clock_gettime(CLOCK_REALTIME, ...) is available. - */ -#define JEMALLOC_HAVE_CLOCK_REALTIME 1 - /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc @@ -181,14 +176,14 @@ /* #undef LG_QUANTUM */ /* One page is 2^LG_PAGE bytes. */ -#define LG_PAGE 16 +#define LG_PAGE 14 /* * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the * system does not explicitly support huge pages; system calls that require * explicit huge page support are separately configured. */ -#define LG_HUGEPAGE 29 +#define LG_HUGEPAGE 21 /* * If defined, adjacent virtual memory mappings with identical attributes @@ -356,7 +351,7 @@ /* #undef JEMALLOC_EXPORT */ /* config.malloc_conf options string. */ -#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@" +#define JEMALLOC_CONFIG_MALLOC_CONF "" /* If defined, jemalloc takes the malloc/free/etc. symbol names. */ /* #undef JEMALLOC_IS_MALLOC */ diff --git a/contrib/krb5-cmake/CMakeLists.txt b/contrib/krb5-cmake/CMakeLists.txt index fce7fbc582a..7c750ca12b6 100644 --- a/contrib/krb5-cmake/CMakeLists.txt +++ b/contrib/krb5-cmake/CMakeLists.txt @@ -3,465 +3,465 @@ if(NOT AWK_PROGRAM) message(FATAL_ERROR "You need the awk program to build ClickHouse with krb5 enabled.") endif() -set(KRB5_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/krb5/src) +set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src") set(ALL_SRCS - ${KRB5_SOURCE_DIR}/util/et/et_name.c - ${KRB5_SOURCE_DIR}/util/et/com_err.c - ${KRB5_SOURCE_DIR}/util/et/error_message.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_names.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unwrap_aead.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_name_attr.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_glue.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/gssd_pname_to_uid.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_authorize_localname.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_prf.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred_with_pw.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_cred_option.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_map_name_to_any.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_seal.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_delete_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_context_time.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_get_name_attr.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mech_invoke.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unwrap_iov.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_exp_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_init_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_accept_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_verify.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_sign.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mechname.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mechattr.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_complete_auth_token.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_wrap_aead.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_cred_oid.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_buffer.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_initialize.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_name_comp.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_context_option.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred_imp_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_neg_mechs.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_oid_ops.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_context_oid.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_del_name_attr.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_decapsulate_token.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_compare_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_name_mapping.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dup_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_wrap_iov.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_oid_set.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unseal.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_store_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_buffer_set.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_canon_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_status.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_name_ext.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_saslname.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_process_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_encapsulate_token.c - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_negoex.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/delete_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/lucid_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/duplicate_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_names.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/prf.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealv3iov.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/store_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/naming_exts.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/s4u_gss_glue.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5unsealiov.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_krb5.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/disp_status.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5seal.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/accept_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/process_context_token.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/disp_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/wrap_size_limit.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/krb5_gss_glue.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_crypt.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_ccache.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_oid.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/val_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/context_time.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/cred_store.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/iakerb.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/copy_ccache.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/init_sec_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/indicate_mechs.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_context.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_seed.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_seqnum.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/compare_name.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/ser_sctx.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealv3.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/acquire_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5unseal.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_cred.c - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_cksum.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_com_err_status.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_generic.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_oid_set.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/oid_ops.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c + "${KRB5_SOURCE_DIR}/util/et/et_name.c" + "${KRB5_SOURCE_DIR}/util/et/com_err.c" + "${KRB5_SOURCE_DIR}/util/et/error_message.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_names.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unwrap_aead.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_name_attr.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_glue.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/gssd_pname_to_uid.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_authorize_localname.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_prf.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred_with_pw.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_cred_option.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_map_name_to_any.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_seal.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_delete_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_context_time.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_get_name_attr.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mech_invoke.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unwrap_iov.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_exp_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_init_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_accept_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_verify.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_sign.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mechname.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_mechattr.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_complete_auth_token.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_wrap_aead.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_cred_oid.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_buffer.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_initialize.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_name_comp.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_context_option.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_acquire_cred_imp_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_set_neg_mechs.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_oid_ops.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_inq_context_oid.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_del_name_attr.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_decapsulate_token.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_compare_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_name_mapping.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_imp_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dup_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_export_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_wrap_iov.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_rel_oid_set.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_unseal.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_store_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_buffer_set.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_canon_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_status.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_dsp_name_ext.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_saslname.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_process_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_encapsulate_token.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue/g_negoex.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/delete_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/lucid_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/duplicate_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_names.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/prf.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealv3iov.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/store_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/naming_exts.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/s4u_gss_glue.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5unsealiov.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_krb5.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/disp_status.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5seal.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/accept_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/import_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/process_context_token.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/disp_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/wrap_size_limit.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/krb5_gss_glue.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_crypt.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_ccache.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_oid.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/val_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/context_time.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/cred_store.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/iakerb.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/copy_ccache.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/init_sec_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/indicate_mechs.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_context.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_seed.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_seqnum.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/compare_name.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/ser_sctx.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealv3.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/acquire_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5unseal.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/rel_cred.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/util_cksum.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_com_err_status.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_generic.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_oid_set.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/oid_ops.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c" - ${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c - ${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c - ${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_ctx.c + "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c" + "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_ctx.c" - # ${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_trace.c + # "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_trace.c" - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prng.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_dk_cmac.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/crc32.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_cbc.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enctype_util.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_etm.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/combine_keys.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/default_state.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/decrypt_iov.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_dk_cmac.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/etypes.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/old_api_glue.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/cksumtypes.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_cmac.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_old.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/decrypt.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_dk.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_des.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_unkeyed.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/crypto_length.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/block_size.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/string_to_key.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/verify_checksum.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/crypto_libinit.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/derive.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/random_to_key.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/verify_checksum_iov.c - # ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_confounder.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_length.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_dk_hmac.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/make_checksum.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_des.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/coll_proof_cksum.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_rc4.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/cf2.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/aead.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt_iov.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/cksumtype_to_string.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/key.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_raw.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/keylengths.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_hmac_md5.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/keyed_cksum.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/keyed_checksum_types.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_aes2.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/state.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_dk_hmac.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_etm.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/make_random_key.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/string_to_cksumtype.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/mandatory_sumtype.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/make_checksum_iov.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_rc4.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/valid_cksumtype.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/nfold.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prng_fortuna.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt_length.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/cmac.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/keyblocks.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_rc4.c - ${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_pbkdf2.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/aes.c - # ${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/rc4.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des3.c - #${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/camellia.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/sha256.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/hmac.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/pbkdf2.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/init.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/stubs.c - # ${KRB5_SOURCE_DIR}/lib/crypto/openssl/hash_provider/hash_crc32.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/hash_provider/hash_evp.c - ${KRB5_SOURCE_DIR}/lib/crypto/openssl/des/des_keys.c - ${KRB5_SOURCE_DIR}/util/support/fake-addrinfo.c - ${KRB5_SOURCE_DIR}/util/support/k5buf.c - ${KRB5_SOURCE_DIR}/util/support/hex.c - ${KRB5_SOURCE_DIR}/util/support/threads.c - ${KRB5_SOURCE_DIR}/util/support/utf8.c - ${KRB5_SOURCE_DIR}/util/support/hashtab.c - ${KRB5_SOURCE_DIR}/util/support/dir_filenames.c - ${KRB5_SOURCE_DIR}/util/support/base64.c - ${KRB5_SOURCE_DIR}/util/support/strerror_r.c - ${KRB5_SOURCE_DIR}/util/support/plugins.c - ${KRB5_SOURCE_DIR}/util/support/path.c - ${KRB5_SOURCE_DIR}/util/support/init-addrinfo.c - ${KRB5_SOURCE_DIR}/util/support/json.c - ${KRB5_SOURCE_DIR}/util/support/errors.c - ${KRB5_SOURCE_DIR}/util/support/utf8_conv.c - ${KRB5_SOURCE_DIR}/util/support/strlcpy.c - ${KRB5_SOURCE_DIR}/util/support/gmt_mktime.c - ${KRB5_SOURCE_DIR}/util/support/zap.c - ${KRB5_SOURCE_DIR}/util/support/bcmp.c - ${KRB5_SOURCE_DIR}/util/support/secure_getenv.c - ${KRB5_SOURCE_DIR}/util/profile/prof_tree.c - ${KRB5_SOURCE_DIR}/util/profile/prof_file.c - ${KRB5_SOURCE_DIR}/util/profile/prof_parse.c - ${KRB5_SOURCE_DIR}/util/profile/prof_get.c - ${KRB5_SOURCE_DIR}/util/profile/prof_set.c - ${KRB5_SOURCE_DIR}/util/profile/prof_err.c - ${KRB5_SOURCE_DIR}/util/profile/prof_init.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_adata.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_tick.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/enc_keyhelper.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_actx.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/init_ctx.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth2.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_princ.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/parse_host_string.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/pr_to_salt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_req.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/pac_sign.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_addrs.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_princ.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_rep.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/str_conv.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_opt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/recvauth.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_cksum.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ai_authdata.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_ctx.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/appdefault.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/bld_princ.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/in_tkt_sky.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_creds.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/auth_con.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_key.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/kdc_rep_dc.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_cred.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_keytab.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_req_dec.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/set_realm.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_sam2.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/libdef_parse.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/privsafe.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_auth.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/val_renew.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_order.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_dec.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/walk_rtree.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_subkey.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_auth.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/chpw.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_req.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/allow_weak.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_rep.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_priv.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/s4u_authdata.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_otp.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/init_keyblock.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_addr.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/encrypt_tk.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/s4u_creds.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/srv_dec_tkt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_priv.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_enc.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_exp.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/decode_kdc.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/decrypt_tk.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/enc_helper.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_req_ext.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_key.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_encts.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/send_tgs.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_cksum.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/tgtname.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/encode_kdc.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_cred.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_safe.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_pkinit.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/srv_rcache.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/chk_trans.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/etype_list.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/get_creds.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_princ.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_pwd.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_save_subkey.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/vfy_increds.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_comp.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/kfree.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/response_items.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/serialize.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/cammac_util.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gc_via_tkt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_ctx.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/sendauth.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_srch.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_safe.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_ec.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/bld_pr_ext.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/random_str.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/sname_match.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/princ_comp.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/get_in_tkt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_seqnum.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/cp_key_cnt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_error.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_athctr.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/deltat.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/get_etype_info.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/plugin.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/kerrs.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/vic_opt.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/unparse.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/parse.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_error.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/pac.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/valid_times.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_data.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb/padata.c + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prng.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_dk_cmac.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/crc32.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_cbc.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enctype_util.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_etm.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/combine_keys.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/default_state.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/decrypt_iov.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_dk_cmac.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/etypes.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/old_api_glue.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/cksumtypes.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_cmac.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_old.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/decrypt.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_dk.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_des.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_unkeyed.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/crypto_length.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/block_size.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/string_to_key.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/verify_checksum.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/crypto_libinit.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/derive.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/random_to_key.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/verify_checksum_iov.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_confounder.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_length.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_dk_hmac.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/make_checksum.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_des.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/coll_proof_cksum.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_rc4.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/cf2.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/aead.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt_iov.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/cksumtype_to_string.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/key.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_raw.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/keylengths.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_hmac_md5.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/keyed_cksum.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/keyed_checksum_types.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_aes2.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/state.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_dk_hmac.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/checksum_etm.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/make_random_key.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/string_to_cksumtype.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/mandatory_sumtype.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/make_checksum_iov.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_rc4.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/valid_cksumtype.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/nfold.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prng_fortuna.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/encrypt_length.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/cmac.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/keyblocks.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/prf_rc4.c" + "${KRB5_SOURCE_DIR}/lib/crypto/krb/s2k_pbkdf2.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/aes.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/rc4.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des3.c" + #"${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/camellia.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/sha256.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/hmac.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/pbkdf2.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/init.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/stubs.c" + # "${KRB5_SOURCE_DIR}/lib/crypto/openssl/hash_provider/hash_crc32.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/hash_provider/hash_evp.c" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl/des/des_keys.c" + "${KRB5_SOURCE_DIR}/util/support/fake-addrinfo.c" + "${KRB5_SOURCE_DIR}/util/support/k5buf.c" + "${KRB5_SOURCE_DIR}/util/support/hex.c" + "${KRB5_SOURCE_DIR}/util/support/threads.c" + "${KRB5_SOURCE_DIR}/util/support/utf8.c" + "${KRB5_SOURCE_DIR}/util/support/hashtab.c" + "${KRB5_SOURCE_DIR}/util/support/dir_filenames.c" + "${KRB5_SOURCE_DIR}/util/support/base64.c" + "${KRB5_SOURCE_DIR}/util/support/strerror_r.c" + "${KRB5_SOURCE_DIR}/util/support/plugins.c" + "${KRB5_SOURCE_DIR}/util/support/path.c" + "${KRB5_SOURCE_DIR}/util/support/init-addrinfo.c" + "${KRB5_SOURCE_DIR}/util/support/json.c" + "${KRB5_SOURCE_DIR}/util/support/errors.c" + "${KRB5_SOURCE_DIR}/util/support/utf8_conv.c" + "${KRB5_SOURCE_DIR}/util/support/strlcpy.c" + "${KRB5_SOURCE_DIR}/util/support/gmt_mktime.c" + "${KRB5_SOURCE_DIR}/util/support/zap.c" + "${KRB5_SOURCE_DIR}/util/support/bcmp.c" + "${KRB5_SOURCE_DIR}/util/support/secure_getenv.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_tree.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_file.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_parse.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_get.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_set.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_err.c" + "${KRB5_SOURCE_DIR}/util/profile/prof_init.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_adata.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_tick.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/enc_keyhelper.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_actx.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/init_ctx.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth2.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_princ.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/parse_host_string.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/pr_to_salt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_req.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/pac_sign.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_addrs.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_princ.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_rep.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/str_conv.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_opt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/recvauth.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_cksum.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ai_authdata.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_ctx.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/appdefault.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/bld_princ.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/in_tkt_sky.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_creds.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/auth_con.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_key.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/kdc_rep_dc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_cred.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_keytab.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_req_dec.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/set_realm.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_sam2.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/libdef_parse.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/privsafe.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_auth.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/val_renew.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_order.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_dec.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/walk_rtree.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_subkey.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_auth.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/chpw.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_req.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/allow_weak.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_rep.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_priv.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/s4u_authdata.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_otp.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/init_keyblock.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_addr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/encrypt_tk.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/s4u_creds.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/srv_dec_tkt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_priv.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_enc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata_exp.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/decode_kdc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/decrypt_tk.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/enc_helper.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_req_ext.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_key.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_encts.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/send_tgs.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_cksum.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/tgtname.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/encode_kdc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_cred.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_safe.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_pkinit.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/srv_rcache.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/chk_trans.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/etype_list.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/get_creds.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/ser_princ.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gic_pwd.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/authdata.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_save_subkey.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/vfy_increds.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_comp.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/kfree.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/response_items.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/serialize.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/cammac_util.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gc_via_tkt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_ctx.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/sendauth.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/addr_srch.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_safe.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/preauth_ec.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/bld_pr_ext.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/random_str.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/sname_match.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/princ_comp.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/get_in_tkt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/gen_seqnum.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/cp_key_cnt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/mk_error.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_athctr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/deltat.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/get_etype_info.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/plugin.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/kerrs.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/vic_opt.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/unparse.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/parse.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/rd_error.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/pac.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/valid_times.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/copy_data.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb/padata.c" - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/thread_safe.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/krbfileio.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/toffset.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostaddr.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/ustime.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/timeofday.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/ccdefname.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/full_ipadr.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/read_pwd.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/trace.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_k5login.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_rule.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localaddr.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_dns.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_domain.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/sn2princ.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/net_write.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/gen_rname.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/net_read.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/accessor.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_profile.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/c_ustime.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/expand_path.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/port2ip.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/changepw.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/unlck_file.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/gen_port.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_an2ln.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/genaddrs.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/init_os_ctx.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localauth.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/locate_kdc.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/prompter.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/ktdefname.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/realm_dom.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/dnssrv.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/mk_faddr.c - # ${KRB5_SOURCE_DIR}/lib/krb5/os/dnsglue.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/sendto_kdc.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_registry.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/write_msg.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_names.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/read_msg.c - ${KRB5_SOURCE_DIR}/lib/krb5/os/lock_file.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_realm.c - # ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ser_cc.c + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/thread_safe.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/krbfileio.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/toffset.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostaddr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/ustime.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/timeofday.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/ccdefname.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/full_ipadr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/read_pwd.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/trace.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_k5login.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_rule.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localaddr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_dns.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_domain.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/sn2princ.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/net_write.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/gen_rname.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/net_read.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/accessor.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_profile.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/c_ustime.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/expand_path.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/port2ip.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/changepw.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/unlck_file.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/gen_port.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_an2ln.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/genaddrs.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/init_os_ctx.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localauth.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/locate_kdc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/prompter.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/ktdefname.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/realm_dom.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/dnssrv.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/mk_faddr.c" + # "${KRB5_SOURCE_DIR}/lib/krb5/os/dnsglue.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/sendto_kdc.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/hostrealm_registry.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/write_msg.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/localauth_names.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/read_msg.c" + "${KRB5_SOURCE_DIR}/lib/krb5/os/lock_file.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_realm.c" + # "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ser_cc.c" - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccdefops.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_retr.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_k5identity.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cccopy.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccfns.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_file.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccbase.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cccursor.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccdefault.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_memory.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccmarshal.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_hostname.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_dir.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_keyring.c - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_kcm.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktadd.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktbase.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktdefault.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/kt_memory.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktfns.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktremove.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/read_servi.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/kt_file.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/read_servi.c - ${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktfr_entry.c + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccdefops.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_retr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_k5identity.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cccopy.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccfns.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_file.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccbase.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cccursor.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccdefault.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_memory.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccmarshal.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccselect_hostname.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_dir.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_keyring.c" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/cc_kcm.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktadd.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktbase.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktdefault.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/kt_memory.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktfns.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktremove.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/read_servi.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/kt_file.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/read_servi.c" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab/ktfr_entry.c" - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c" - ${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c - ${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_dfl.c - ${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_file2.c - ${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_none.c - ${KRB5_SOURCE_DIR}/lib/krb5/rcache/memrcache.c - ${KRB5_SOURCE_DIR}/lib/krb5/unicode/ucdata/ucdata.c - ${KRB5_SOURCE_DIR}/lib/krb5/unicode/ucstr.c - ${KRB5_SOURCE_DIR}/lib/krb5/asn.1/asn1_encode.c - ${KRB5_SOURCE_DIR}/lib/krb5/asn.1/asn1_k_encode.c - ${KRB5_SOURCE_DIR}/lib/krb5/asn.1/ldap_key_seq.c - ${KRB5_SOURCE_DIR}/lib/krb5/krb5_libinit.c + "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c" + "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_dfl.c" + "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_file2.c" + "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_none.c" + "${KRB5_SOURCE_DIR}/lib/krb5/rcache/memrcache.c" + "${KRB5_SOURCE_DIR}/lib/krb5/unicode/ucdata/ucdata.c" + "${KRB5_SOURCE_DIR}/lib/krb5/unicode/ucstr.c" + "${KRB5_SOURCE_DIR}/lib/krb5/asn.1/asn1_encode.c" + "${KRB5_SOURCE_DIR}/lib/krb5/asn.1/asn1_k_encode.c" + "${KRB5_SOURCE_DIR}/lib/krb5/asn.1/ldap_key_seq.c" + "${KRB5_SOURCE_DIR}/lib/krb5/krb5_libinit.c" ) add_custom_command( - OUTPUT ${KRB5_SOURCE_DIR}/util/et/compile_et + OUTPUT "${KRB5_SOURCE_DIR}/util/et/compile_et" COMMAND /bin/sh ./config_script ./compile_et.sh @@ -470,7 +470,7 @@ add_custom_command( sed > compile_et - DEPENDS ${KRB5_SOURCE_DIR}/util/et/compile_et.sh ${KRB5_SOURCE_DIR}/util/et/config_script + DEPENDS "${KRB5_SOURCE_DIR}/util/et/compile_et.sh" "${KRB5_SOURCE_DIR}/util/et/config_script" WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/util/et" ) @@ -497,8 +497,8 @@ function(preprocess_et out_var) get_filename_component(ET_PATH ${in_f} DIRECTORY) add_custom_command(OUTPUT ${F_C} ${F_H} - COMMAND perl ${KRB5_SOURCE_DIR}/util/et/compile_et -d "${KRB5_SOURCE_DIR}/util/et" ${in_f} - DEPENDS ${in_f} ${KRB5_SOURCE_DIR}/util/et/compile_et + COMMAND perl "${KRB5_SOURCE_DIR}/util/et/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${in_f} + DEPENDS ${in_f} "${KRB5_SOURCE_DIR}/util/et/compile_et" WORKING_DIRECTORY ${ET_PATH} COMMENT "Creating preprocessed file ${F_C}" VERBATIM @@ -509,7 +509,7 @@ function(preprocess_et out_var) endfunction() add_custom_command( - OUTPUT ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h + OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h" COMMAND perl -I../../../util ../../../util/gen-map.pl @@ -525,27 +525,27 @@ add_custom_command( add_custom_target( ERROR_MAP_H - DEPENDS ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h + DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h" COMMENT "generating error_map.h" VERBATIM ) add_custom_command( - OUTPUT ${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h + OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h" COMMAND perl -w -I../../../util ../../../util/gen.pl bimap errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/lib/gssapi/generic" ) add_custom_target( ERRMAP_H - DEPENDS ${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h + DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h" COMMENT "generating errmap.h" VERBATIM ) add_custom_target( KRB_5_H - DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h + DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h" COMMENT "generating krb5.h" VERBATIM ) @@ -563,12 +563,12 @@ preprocess_et(processed_et_files ${ET_FILES}) if(CMAKE_SYSTEM_NAME MATCHES "Darwin") add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.h ${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.c - COMMAND mig -header kcmrpc.h -user kcmrpc.c -sheader /dev/null -server /dev/null -I${KRB5_SOURCE_DIR}/lib/krb5/ccache ${KRB5_SOURCE_DIR}/lib/krb5/ccache/kcmrpc.defs + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.h" "${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.c" + COMMAND mig -header kcmrpc.h -user kcmrpc.c -sheader /dev/null -server /dev/null -I"${KRB5_SOURCE_DIR}/lib/krb5/ccache" "${KRB5_SOURCE_DIR}/lib/krb5/ccache/kcmrpc.defs" WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/include_private" ) - list(APPEND ALL_SRCS ${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.c) + list(APPEND ALL_SRCS "${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.c") endif() target_sources(${KRB5_LIBRARY} PRIVATE @@ -576,98 +576,98 @@ target_sources(${KRB5_LIBRARY} PRIVATE ) file(MAKE_DIRECTORY - ${CMAKE_CURRENT_BINARY_DIR}/include/gssapi + "${CMAKE_CURRENT_BINARY_DIR}/include/gssapi" ) file(GLOB GSSAPI_GENERIC_HEADERS - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/*.h - ${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi.hin + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/*.h" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi.hin" ) file(COPY ${GSSAPI_GENERIC_HEADERS} - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/ + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/" ) file(RENAME - ${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/gssapi.hin - ${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/gssapi.h + "${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/gssapi.hin" + "${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/gssapi.h" ) -file(COPY ${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_krb5.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/ +file(COPY "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_krb5.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include/gssapi/" ) -file(COPY ${KRB5_SOURCE_DIR}/util/et/com_err.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include/ +file(COPY "${KRB5_SOURCE_DIR}/util/et/com_err.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include/" ) -file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/osconf.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include_private/ +file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/osconf.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include_private/" ) -file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/profile.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include_private/ +file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/profile.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include_private/" ) string(TOLOWER "${CMAKE_SYSTEM_NAME}" _system_name) -file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/autoconf_${_system_name}.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include_private/ +file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/autoconf_${_system_name}.h" + DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/include_private/" ) file(RENAME - ${CMAKE_CURRENT_BINARY_DIR}/include_private/autoconf_${_system_name}.h - ${CMAKE_CURRENT_BINARY_DIR}/include_private/autoconf.h + "${CMAKE_CURRENT_BINARY_DIR}/include_private/autoconf_${_system_name}.h" + "${CMAKE_CURRENT_BINARY_DIR}/include_private/autoconf.h" ) file(MAKE_DIRECTORY - ${CMAKE_CURRENT_BINARY_DIR}/include/krb5 + "${CMAKE_CURRENT_BINARY_DIR}/include/krb5" ) SET(KRBHDEP - ${KRB5_SOURCE_DIR}/include/krb5/krb5.hin - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h - ${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h + "${KRB5_SOURCE_DIR}/include/krb5/krb5.hin" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h" + "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h" ) # cmake < 3.18 does not have 'cat' command add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h - COMMAND cat ${KRBHDEP} > ${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h" + COMMAND cat ${KRBHDEP} > "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h" DEPENDS ${KRBHDEP} ) target_include_directories(${KRB5_LIBRARY} PUBLIC - ${KRB5_SOURCE_DIR}/include - ${CMAKE_CURRENT_BINARY_DIR}/include + "${KRB5_SOURCE_DIR}/include" + "${CMAKE_CURRENT_BINARY_DIR}/include" ) target_include_directories(${KRB5_LIBRARY} PRIVATE - ${CMAKE_CURRENT_BINARY_DIR}/include_private # For autoconf.h and other generated headers. + "${CMAKE_CURRENT_BINARY_DIR}/include_private" # For autoconf.h and other generated headers. ${KRB5_SOURCE_DIR} - ${KRB5_SOURCE_DIR}/include - ${KRB5_SOURCE_DIR}/lib/gssapi/mechglue - ${KRB5_SOURCE_DIR}/lib/ - ${KRB5_SOURCE_DIR}/lib/gssapi - ${KRB5_SOURCE_DIR}/lib/gssapi/generic - ${KRB5_SOURCE_DIR}/lib/gssapi/krb5 - ${KRB5_SOURCE_DIR}/lib/gssapi/spnego - ${KRB5_SOURCE_DIR}/util/et - ${KRB5_SOURCE_DIR}/lib/crypto/openssl - ${KRB5_SOURCE_DIR}/lib/crypto/krb - ${KRB5_SOURCE_DIR}/util/profile - ${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccapi - ${KRB5_SOURCE_DIR}/lib/krb5/ccache - ${KRB5_SOURCE_DIR}/lib/krb5/keytab - ${KRB5_SOURCE_DIR}/lib/krb5/rcache - ${KRB5_SOURCE_DIR}/lib/krb5/unicode - ${KRB5_SOURCE_DIR}/lib/krb5/os + "${KRB5_SOURCE_DIR}/include" + "${KRB5_SOURCE_DIR}/lib/gssapi/mechglue" + "${KRB5_SOURCE_DIR}/lib/" + "${KRB5_SOURCE_DIR}/lib/gssapi" + "${KRB5_SOURCE_DIR}/lib/gssapi/generic" + "${KRB5_SOURCE_DIR}/lib/gssapi/krb5" + "${KRB5_SOURCE_DIR}/lib/gssapi/spnego" + "${KRB5_SOURCE_DIR}/util/et" + "${KRB5_SOURCE_DIR}/lib/crypto/openssl" + "${KRB5_SOURCE_DIR}/lib/crypto/krb" + "${KRB5_SOURCE_DIR}/util/profile" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache/ccapi" + "${KRB5_SOURCE_DIR}/lib/krb5/ccache" + "${KRB5_SOURCE_DIR}/lib/krb5/keytab" + "${KRB5_SOURCE_DIR}/lib/krb5/rcache" + "${KRB5_SOURCE_DIR}/lib/krb5/unicode" + "${KRB5_SOURCE_DIR}/lib/krb5/os" # ${OPENSSL_INCLUDE_DIR} ) diff --git a/contrib/libcxx b/contrib/libcxx index 8b80a151d12..2fa892f69ac 160000 --- a/contrib/libcxx +++ b/contrib/libcxx @@ -1 +1 @@ -Subproject commit 8b80a151d12b98ffe2d0c22f7cec12c3b9ff88d7 +Subproject commit 2fa892f69acbaa40f8a18c6484854a6183a34482 diff --git a/contrib/libcxx-cmake/CMakeLists.txt b/contrib/libcxx-cmake/CMakeLists.txt index 3b5d53cd1c0..0cfb4191619 100644 --- a/contrib/libcxx-cmake/CMakeLists.txt +++ b/contrib/libcxx-cmake/CMakeLists.txt @@ -1,49 +1,49 @@ include(CheckCXXCompilerFlag) -set(LIBCXX_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxx) +set(LIBCXX_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxx") set(SRCS -${LIBCXX_SOURCE_DIR}/src/algorithm.cpp -${LIBCXX_SOURCE_DIR}/src/any.cpp -${LIBCXX_SOURCE_DIR}/src/atomic.cpp -${LIBCXX_SOURCE_DIR}/src/barrier.cpp -${LIBCXX_SOURCE_DIR}/src/bind.cpp -${LIBCXX_SOURCE_DIR}/src/charconv.cpp -${LIBCXX_SOURCE_DIR}/src/chrono.cpp -${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp -${LIBCXX_SOURCE_DIR}/src/condition_variable_destructor.cpp -${LIBCXX_SOURCE_DIR}/src/debug.cpp -${LIBCXX_SOURCE_DIR}/src/exception.cpp -${LIBCXX_SOURCE_DIR}/src/experimental/memory_resource.cpp -${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp -${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp -${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp -${LIBCXX_SOURCE_DIR}/src/functional.cpp -${LIBCXX_SOURCE_DIR}/src/future.cpp -${LIBCXX_SOURCE_DIR}/src/hash.cpp -${LIBCXX_SOURCE_DIR}/src/ios.cpp -${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp -${LIBCXX_SOURCE_DIR}/src/iostream.cpp -${LIBCXX_SOURCE_DIR}/src/locale.cpp -${LIBCXX_SOURCE_DIR}/src/memory.cpp -${LIBCXX_SOURCE_DIR}/src/mutex.cpp -${LIBCXX_SOURCE_DIR}/src/mutex_destructor.cpp -${LIBCXX_SOURCE_DIR}/src/new.cpp -${LIBCXX_SOURCE_DIR}/src/optional.cpp -${LIBCXX_SOURCE_DIR}/src/random.cpp -${LIBCXX_SOURCE_DIR}/src/random_shuffle.cpp -${LIBCXX_SOURCE_DIR}/src/regex.cpp -${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp -${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp -${LIBCXX_SOURCE_DIR}/src/string.cpp -${LIBCXX_SOURCE_DIR}/src/strstream.cpp -${LIBCXX_SOURCE_DIR}/src/system_error.cpp -${LIBCXX_SOURCE_DIR}/src/thread.cpp -${LIBCXX_SOURCE_DIR}/src/typeinfo.cpp -${LIBCXX_SOURCE_DIR}/src/utility.cpp -${LIBCXX_SOURCE_DIR}/src/valarray.cpp -${LIBCXX_SOURCE_DIR}/src/variant.cpp -${LIBCXX_SOURCE_DIR}/src/vector.cpp +"${LIBCXX_SOURCE_DIR}/src/algorithm.cpp" +"${LIBCXX_SOURCE_DIR}/src/any.cpp" +"${LIBCXX_SOURCE_DIR}/src/atomic.cpp" +"${LIBCXX_SOURCE_DIR}/src/barrier.cpp" +"${LIBCXX_SOURCE_DIR}/src/bind.cpp" +"${LIBCXX_SOURCE_DIR}/src/charconv.cpp" +"${LIBCXX_SOURCE_DIR}/src/chrono.cpp" +"${LIBCXX_SOURCE_DIR}/src/condition_variable.cpp" +"${LIBCXX_SOURCE_DIR}/src/condition_variable_destructor.cpp" +"${LIBCXX_SOURCE_DIR}/src/debug.cpp" +"${LIBCXX_SOURCE_DIR}/src/exception.cpp" +"${LIBCXX_SOURCE_DIR}/src/experimental/memory_resource.cpp" +"${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp" +"${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp" +"${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp" +"${LIBCXX_SOURCE_DIR}/src/functional.cpp" +"${LIBCXX_SOURCE_DIR}/src/future.cpp" +"${LIBCXX_SOURCE_DIR}/src/hash.cpp" +"${LIBCXX_SOURCE_DIR}/src/ios.cpp" +"${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp" +"${LIBCXX_SOURCE_DIR}/src/iostream.cpp" +"${LIBCXX_SOURCE_DIR}/src/locale.cpp" +"${LIBCXX_SOURCE_DIR}/src/memory.cpp" +"${LIBCXX_SOURCE_DIR}/src/mutex.cpp" +"${LIBCXX_SOURCE_DIR}/src/mutex_destructor.cpp" +"${LIBCXX_SOURCE_DIR}/src/new.cpp" +"${LIBCXX_SOURCE_DIR}/src/optional.cpp" +"${LIBCXX_SOURCE_DIR}/src/random.cpp" +"${LIBCXX_SOURCE_DIR}/src/random_shuffle.cpp" +"${LIBCXX_SOURCE_DIR}/src/regex.cpp" +"${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp" +"${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp" +"${LIBCXX_SOURCE_DIR}/src/string.cpp" +"${LIBCXX_SOURCE_DIR}/src/strstream.cpp" +"${LIBCXX_SOURCE_DIR}/src/system_error.cpp" +"${LIBCXX_SOURCE_DIR}/src/thread.cpp" +"${LIBCXX_SOURCE_DIR}/src/typeinfo.cpp" +"${LIBCXX_SOURCE_DIR}/src/utility.cpp" +"${LIBCXX_SOURCE_DIR}/src/valarray.cpp" +"${LIBCXX_SOURCE_DIR}/src/variant.cpp" +"${LIBCXX_SOURCE_DIR}/src/vector.cpp" ) add_library(cxx ${SRCS}) @@ -56,6 +56,11 @@ if (USE_UNWIND) target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1) endif () +# Override the deduced attribute support that causes error. +if (OS_DARWIN AND COMPILER_GCC) + add_compile_definitions(_LIBCPP_INIT_PRIORITY_MAX) +endif () + target_compile_options(cxx PUBLIC $<$:-nostdinc++>) # Third party library may have substandard code. diff --git a/contrib/libcxxabi-cmake/CMakeLists.txt b/contrib/libcxxabi-cmake/CMakeLists.txt index 9d8b94dabf0..0bb5d663633 100644 --- a/contrib/libcxxabi-cmake/CMakeLists.txt +++ b/contrib/libcxxabi-cmake/CMakeLists.txt @@ -1,24 +1,24 @@ -set(LIBCXXABI_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libcxxabi) +set(LIBCXXABI_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxxabi") set(SRCS -${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_virtual.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp -${LIBCXXABI_SOURCE_DIR}/src/fallback_malloc.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_default_handlers.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp -${LIBCXXABI_SOURCE_DIR}/src/stdlib_exception.cpp -${LIBCXXABI_SOURCE_DIR}/src/abort_message.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_demangle.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_exception.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp -${LIBCXXABI_SOURCE_DIR}/src/private_typeinfo.cpp -${LIBCXXABI_SOURCE_DIR}/src/stdlib_typeinfo.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_aux_runtime.cpp -${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp -${LIBCXXABI_SOURCE_DIR}/src/stdlib_new_delete.cpp +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_virtual.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/fallback_malloc.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_default_handlers.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_exception.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/abort_message.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_demangle.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_exception.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/private_typeinfo.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_typeinfo.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_aux_runtime.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_new_delete.cpp" ) add_library(cxxabi ${SRCS}) diff --git a/contrib/libhdfs3-cmake/CMake/Options.cmake b/contrib/libhdfs3-cmake/CMake/Options.cmake index d7ccc8b6475..04ab823eedc 100644 --- a/contrib/libhdfs3-cmake/CMake/Options.cmake +++ b/contrib/libhdfs3-cmake/CMake/Options.cmake @@ -22,7 +22,7 @@ ADD_DEFINITIONS(-D_GLIBCXX_USE_NANOSLEEP) TRY_COMPILE(STRERROR_R_RETURN_INT ${CMAKE_CURRENT_BINARY_DIR} - ${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileStrerror.cpp + "${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileStrerror.cpp" CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'" OUTPUT_VARIABLE OUTPUT) @@ -36,13 +36,13 @@ ENDIF(STRERROR_R_RETURN_INT) TRY_COMPILE(HAVE_STEADY_CLOCK ${CMAKE_CURRENT_BINARY_DIR} - ${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileSteadyClock.cpp + "${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileSteadyClock.cpp" CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'" OUTPUT_VARIABLE OUTPUT) TRY_COMPILE(HAVE_NESTED_EXCEPTION ${CMAKE_CURRENT_BINARY_DIR} - ${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileNestedException.cpp + "${HDFS3_ROOT_DIR}/CMake/CMakeTestCompileNestedException.cpp" CMAKE_FLAGS "-DCMAKE_CXX_LINK_EXECUTABLE='echo not linking now...'" OUTPUT_VARIABLE OUTPUT) diff --git a/contrib/libhdfs3-cmake/CMakeLists.txt b/contrib/libhdfs3-cmake/CMakeLists.txt index 60f4376bdea..c9b9179d5e6 100644 --- a/contrib/libhdfs3-cmake/CMakeLists.txt +++ b/contrib/libhdfs3-cmake/CMakeLists.txt @@ -24,9 +24,9 @@ else() endif() # project and source dir -set(HDFS3_ROOT_DIR ${ClickHouse_SOURCE_DIR}/contrib/libhdfs3) -set(HDFS3_SOURCE_DIR ${HDFS3_ROOT_DIR}/src) -set(HDFS3_COMMON_DIR ${HDFS3_SOURCE_DIR}/common) +set(HDFS3_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3") +set(HDFS3_SOURCE_DIR "${HDFS3_ROOT_DIR}/src") +set(HDFS3_COMMON_DIR "${HDFS3_SOURCE_DIR}/common") # module set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake" ${CMAKE_MODULE_PATH}) @@ -35,165 +35,165 @@ include(Options) # source set(PROTO_FILES - #${HDFS3_SOURCE_DIR}/proto/encryption.proto - ${HDFS3_SOURCE_DIR}/proto/ClientDatanodeProtocol.proto - ${HDFS3_SOURCE_DIR}/proto/hdfs.proto - ${HDFS3_SOURCE_DIR}/proto/Security.proto - ${HDFS3_SOURCE_DIR}/proto/ProtobufRpcEngine.proto - ${HDFS3_SOURCE_DIR}/proto/ClientNamenodeProtocol.proto - ${HDFS3_SOURCE_DIR}/proto/IpcConnectionContext.proto - ${HDFS3_SOURCE_DIR}/proto/RpcHeader.proto - ${HDFS3_SOURCE_DIR}/proto/datatransfer.proto + #"${HDFS3_SOURCE_DIR}/proto/encryption.proto" + "${HDFS3_SOURCE_DIR}/proto/ClientDatanodeProtocol.proto" + "${HDFS3_SOURCE_DIR}/proto/hdfs.proto" + "${HDFS3_SOURCE_DIR}/proto/Security.proto" + "${HDFS3_SOURCE_DIR}/proto/ProtobufRpcEngine.proto" + "${HDFS3_SOURCE_DIR}/proto/ClientNamenodeProtocol.proto" + "${HDFS3_SOURCE_DIR}/proto/IpcConnectionContext.proto" + "${HDFS3_SOURCE_DIR}/proto/RpcHeader.proto" + "${HDFS3_SOURCE_DIR}/proto/datatransfer.proto" ) if(USE_PROTOBUF) PROTOBUF_GENERATE_CPP(PROTO_SOURCES PROTO_HEADERS ${PROTO_FILES}) endif() -configure_file(${HDFS3_SOURCE_DIR}/platform.h.in ${CMAKE_CURRENT_BINARY_DIR}/platform.h) +configure_file("${HDFS3_SOURCE_DIR}/platform.h.in" "${CMAKE_CURRENT_BINARY_DIR}/platform.h") set(SRCS - ${HDFS3_SOURCE_DIR}/network/TcpSocket.cpp - ${HDFS3_SOURCE_DIR}/network/DomainSocket.cpp - ${HDFS3_SOURCE_DIR}/network/BufferedSocketReader.cpp - ${HDFS3_SOURCE_DIR}/client/ReadShortCircuitInfo.cpp - ${HDFS3_SOURCE_DIR}/client/Pipeline.cpp - ${HDFS3_SOURCE_DIR}/client/Hdfs.cpp - ${HDFS3_SOURCE_DIR}/client/Packet.cpp - ${HDFS3_SOURCE_DIR}/client/OutputStreamImpl.cpp - ${HDFS3_SOURCE_DIR}/client/KerberosName.cpp - ${HDFS3_SOURCE_DIR}/client/PacketHeader.cpp - ${HDFS3_SOURCE_DIR}/client/LocalBlockReader.cpp - ${HDFS3_SOURCE_DIR}/client/UserInfo.cpp - ${HDFS3_SOURCE_DIR}/client/RemoteBlockReader.cpp - ${HDFS3_SOURCE_DIR}/client/Permission.cpp - ${HDFS3_SOURCE_DIR}/client/FileSystemImpl.cpp - ${HDFS3_SOURCE_DIR}/client/DirectoryIterator.cpp - ${HDFS3_SOURCE_DIR}/client/FileSystemKey.cpp - ${HDFS3_SOURCE_DIR}/client/DataTransferProtocolSender.cpp - ${HDFS3_SOURCE_DIR}/client/LeaseRenewer.cpp - ${HDFS3_SOURCE_DIR}/client/PeerCache.cpp - ${HDFS3_SOURCE_DIR}/client/InputStream.cpp - ${HDFS3_SOURCE_DIR}/client/FileSystem.cpp - ${HDFS3_SOURCE_DIR}/client/InputStreamImpl.cpp - ${HDFS3_SOURCE_DIR}/client/Token.cpp - ${HDFS3_SOURCE_DIR}/client/PacketPool.cpp - ${HDFS3_SOURCE_DIR}/client/OutputStream.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcChannelKey.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcProtocolInfo.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcRemoteCall.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcChannel.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcAuth.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcContentWrapper.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcConfig.cpp - ${HDFS3_SOURCE_DIR}/rpc/RpcServerInfo.cpp - ${HDFS3_SOURCE_DIR}/rpc/SaslClient.cpp - ${HDFS3_SOURCE_DIR}/server/Datanode.cpp - ${HDFS3_SOURCE_DIR}/server/LocatedBlocks.cpp - ${HDFS3_SOURCE_DIR}/server/NamenodeProxy.cpp - ${HDFS3_SOURCE_DIR}/server/NamenodeImpl.cpp - ${HDFS3_SOURCE_DIR}/server/NamenodeInfo.cpp - ${HDFS3_SOURCE_DIR}/common/WritableUtils.cpp - ${HDFS3_SOURCE_DIR}/common/ExceptionInternal.cpp - ${HDFS3_SOURCE_DIR}/common/SessionConfig.cpp - ${HDFS3_SOURCE_DIR}/common/StackPrinter.cpp - ${HDFS3_SOURCE_DIR}/common/Exception.cpp - ${HDFS3_SOURCE_DIR}/common/Logger.cpp - ${HDFS3_SOURCE_DIR}/common/CFileWrapper.cpp - ${HDFS3_SOURCE_DIR}/common/XmlConfig.cpp - ${HDFS3_SOURCE_DIR}/common/WriteBuffer.cpp - ${HDFS3_SOURCE_DIR}/common/HWCrc32c.cpp - ${HDFS3_SOURCE_DIR}/common/MappedFileWrapper.cpp - ${HDFS3_SOURCE_DIR}/common/Hash.cpp - ${HDFS3_SOURCE_DIR}/common/SWCrc32c.cpp - ${HDFS3_SOURCE_DIR}/common/Thread.cpp + "${HDFS3_SOURCE_DIR}/network/TcpSocket.cpp" + "${HDFS3_SOURCE_DIR}/network/DomainSocket.cpp" + "${HDFS3_SOURCE_DIR}/network/BufferedSocketReader.cpp" + "${HDFS3_SOURCE_DIR}/client/ReadShortCircuitInfo.cpp" + "${HDFS3_SOURCE_DIR}/client/Pipeline.cpp" + "${HDFS3_SOURCE_DIR}/client/Hdfs.cpp" + "${HDFS3_SOURCE_DIR}/client/Packet.cpp" + "${HDFS3_SOURCE_DIR}/client/OutputStreamImpl.cpp" + "${HDFS3_SOURCE_DIR}/client/KerberosName.cpp" + "${HDFS3_SOURCE_DIR}/client/PacketHeader.cpp" + "${HDFS3_SOURCE_DIR}/client/LocalBlockReader.cpp" + "${HDFS3_SOURCE_DIR}/client/UserInfo.cpp" + "${HDFS3_SOURCE_DIR}/client/RemoteBlockReader.cpp" + "${HDFS3_SOURCE_DIR}/client/Permission.cpp" + "${HDFS3_SOURCE_DIR}/client/FileSystemImpl.cpp" + "${HDFS3_SOURCE_DIR}/client/DirectoryIterator.cpp" + "${HDFS3_SOURCE_DIR}/client/FileSystemKey.cpp" + "${HDFS3_SOURCE_DIR}/client/DataTransferProtocolSender.cpp" + "${HDFS3_SOURCE_DIR}/client/LeaseRenewer.cpp" + "${HDFS3_SOURCE_DIR}/client/PeerCache.cpp" + "${HDFS3_SOURCE_DIR}/client/InputStream.cpp" + "${HDFS3_SOURCE_DIR}/client/FileSystem.cpp" + "${HDFS3_SOURCE_DIR}/client/InputStreamImpl.cpp" + "${HDFS3_SOURCE_DIR}/client/Token.cpp" + "${HDFS3_SOURCE_DIR}/client/PacketPool.cpp" + "${HDFS3_SOURCE_DIR}/client/OutputStream.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcChannelKey.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcProtocolInfo.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcRemoteCall.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcChannel.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcAuth.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcContentWrapper.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcConfig.cpp" + "${HDFS3_SOURCE_DIR}/rpc/RpcServerInfo.cpp" + "${HDFS3_SOURCE_DIR}/rpc/SaslClient.cpp" + "${HDFS3_SOURCE_DIR}/server/Datanode.cpp" + "${HDFS3_SOURCE_DIR}/server/LocatedBlocks.cpp" + "${HDFS3_SOURCE_DIR}/server/NamenodeProxy.cpp" + "${HDFS3_SOURCE_DIR}/server/NamenodeImpl.cpp" + "${HDFS3_SOURCE_DIR}/server/NamenodeInfo.cpp" + "${HDFS3_SOURCE_DIR}/common/WritableUtils.cpp" + "${HDFS3_SOURCE_DIR}/common/ExceptionInternal.cpp" + "${HDFS3_SOURCE_DIR}/common/SessionConfig.cpp" + "${HDFS3_SOURCE_DIR}/common/StackPrinter.cpp" + "${HDFS3_SOURCE_DIR}/common/Exception.cpp" + "${HDFS3_SOURCE_DIR}/common/Logger.cpp" + "${HDFS3_SOURCE_DIR}/common/CFileWrapper.cpp" + "${HDFS3_SOURCE_DIR}/common/XmlConfig.cpp" + "${HDFS3_SOURCE_DIR}/common/WriteBuffer.cpp" + "${HDFS3_SOURCE_DIR}/common/HWCrc32c.cpp" + "${HDFS3_SOURCE_DIR}/common/MappedFileWrapper.cpp" + "${HDFS3_SOURCE_DIR}/common/Hash.cpp" + "${HDFS3_SOURCE_DIR}/common/SWCrc32c.cpp" + "${HDFS3_SOURCE_DIR}/common/Thread.cpp" - ${HDFS3_SOURCE_DIR}/network/TcpSocket.h - ${HDFS3_SOURCE_DIR}/network/BufferedSocketReader.h - ${HDFS3_SOURCE_DIR}/network/Socket.h - ${HDFS3_SOURCE_DIR}/network/DomainSocket.h - ${HDFS3_SOURCE_DIR}/network/Syscall.h - ${HDFS3_SOURCE_DIR}/client/InputStreamImpl.h - ${HDFS3_SOURCE_DIR}/client/FileSystem.h - ${HDFS3_SOURCE_DIR}/client/ReadShortCircuitInfo.h - ${HDFS3_SOURCE_DIR}/client/InputStreamInter.h - ${HDFS3_SOURCE_DIR}/client/FileSystemImpl.h - ${HDFS3_SOURCE_DIR}/client/PacketPool.h - ${HDFS3_SOURCE_DIR}/client/Pipeline.h - ${HDFS3_SOURCE_DIR}/client/OutputStreamInter.h - ${HDFS3_SOURCE_DIR}/client/RemoteBlockReader.h - ${HDFS3_SOURCE_DIR}/client/Token.h - ${HDFS3_SOURCE_DIR}/client/KerberosName.h - ${HDFS3_SOURCE_DIR}/client/DirectoryIterator.h - ${HDFS3_SOURCE_DIR}/client/hdfs.h - ${HDFS3_SOURCE_DIR}/client/FileSystemStats.h - ${HDFS3_SOURCE_DIR}/client/FileSystemKey.h - ${HDFS3_SOURCE_DIR}/client/DataTransferProtocolSender.h - ${HDFS3_SOURCE_DIR}/client/Packet.h - ${HDFS3_SOURCE_DIR}/client/PacketHeader.h - ${HDFS3_SOURCE_DIR}/client/FileSystemInter.h - ${HDFS3_SOURCE_DIR}/client/LocalBlockReader.h - ${HDFS3_SOURCE_DIR}/client/TokenInternal.h - ${HDFS3_SOURCE_DIR}/client/InputStream.h - ${HDFS3_SOURCE_DIR}/client/PipelineAck.h - ${HDFS3_SOURCE_DIR}/client/BlockReader.h - ${HDFS3_SOURCE_DIR}/client/Permission.h - ${HDFS3_SOURCE_DIR}/client/OutputStreamImpl.h - ${HDFS3_SOURCE_DIR}/client/LeaseRenewer.h - ${HDFS3_SOURCE_DIR}/client/UserInfo.h - ${HDFS3_SOURCE_DIR}/client/PeerCache.h - ${HDFS3_SOURCE_DIR}/client/OutputStream.h - ${HDFS3_SOURCE_DIR}/client/FileStatus.h - ${HDFS3_SOURCE_DIR}/client/DataTransferProtocol.h - ${HDFS3_SOURCE_DIR}/client/BlockLocation.h - ${HDFS3_SOURCE_DIR}/rpc/RpcConfig.h - ${HDFS3_SOURCE_DIR}/rpc/SaslClient.h - ${HDFS3_SOURCE_DIR}/rpc/RpcAuth.h - ${HDFS3_SOURCE_DIR}/rpc/RpcClient.h - ${HDFS3_SOURCE_DIR}/rpc/RpcCall.h - ${HDFS3_SOURCE_DIR}/rpc/RpcContentWrapper.h - ${HDFS3_SOURCE_DIR}/rpc/RpcProtocolInfo.h - ${HDFS3_SOURCE_DIR}/rpc/RpcRemoteCall.h - ${HDFS3_SOURCE_DIR}/rpc/RpcServerInfo.h - ${HDFS3_SOURCE_DIR}/rpc/RpcChannel.h - ${HDFS3_SOURCE_DIR}/rpc/RpcChannelKey.h - ${HDFS3_SOURCE_DIR}/server/BlockLocalPathInfo.h - ${HDFS3_SOURCE_DIR}/server/LocatedBlocks.h - ${HDFS3_SOURCE_DIR}/server/DatanodeInfo.h - ${HDFS3_SOURCE_DIR}/server/RpcHelper.h - ${HDFS3_SOURCE_DIR}/server/ExtendedBlock.h - ${HDFS3_SOURCE_DIR}/server/NamenodeInfo.h - ${HDFS3_SOURCE_DIR}/server/NamenodeImpl.h - ${HDFS3_SOURCE_DIR}/server/LocatedBlock.h - ${HDFS3_SOURCE_DIR}/server/NamenodeProxy.h - ${HDFS3_SOURCE_DIR}/server/Datanode.h - ${HDFS3_SOURCE_DIR}/server/Namenode.h - ${HDFS3_SOURCE_DIR}/common/XmlConfig.h - ${HDFS3_SOURCE_DIR}/common/Logger.h - ${HDFS3_SOURCE_DIR}/common/WriteBuffer.h - ${HDFS3_SOURCE_DIR}/common/HWCrc32c.h - ${HDFS3_SOURCE_DIR}/common/Checksum.h - ${HDFS3_SOURCE_DIR}/common/SessionConfig.h - ${HDFS3_SOURCE_DIR}/common/Unordered.h - ${HDFS3_SOURCE_DIR}/common/BigEndian.h - ${HDFS3_SOURCE_DIR}/common/Thread.h - ${HDFS3_SOURCE_DIR}/common/StackPrinter.h - ${HDFS3_SOURCE_DIR}/common/Exception.h - ${HDFS3_SOURCE_DIR}/common/WritableUtils.h - ${HDFS3_SOURCE_DIR}/common/StringUtil.h - ${HDFS3_SOURCE_DIR}/common/LruMap.h - ${HDFS3_SOURCE_DIR}/common/Function.h - ${HDFS3_SOURCE_DIR}/common/DateTime.h - ${HDFS3_SOURCE_DIR}/common/Hash.h - ${HDFS3_SOURCE_DIR}/common/SWCrc32c.h - ${HDFS3_SOURCE_DIR}/common/ExceptionInternal.h - ${HDFS3_SOURCE_DIR}/common/Memory.h - ${HDFS3_SOURCE_DIR}/common/FileWrapper.h + "${HDFS3_SOURCE_DIR}/network/TcpSocket.h" + "${HDFS3_SOURCE_DIR}/network/BufferedSocketReader.h" + "${HDFS3_SOURCE_DIR}/network/Socket.h" + "${HDFS3_SOURCE_DIR}/network/DomainSocket.h" + "${HDFS3_SOURCE_DIR}/network/Syscall.h" + "${HDFS3_SOURCE_DIR}/client/InputStreamImpl.h" + "${HDFS3_SOURCE_DIR}/client/FileSystem.h" + "${HDFS3_SOURCE_DIR}/client/ReadShortCircuitInfo.h" + "${HDFS3_SOURCE_DIR}/client/InputStreamInter.h" + "${HDFS3_SOURCE_DIR}/client/FileSystemImpl.h" + "${HDFS3_SOURCE_DIR}/client/PacketPool.h" + "${HDFS3_SOURCE_DIR}/client/Pipeline.h" + "${HDFS3_SOURCE_DIR}/client/OutputStreamInter.h" + "${HDFS3_SOURCE_DIR}/client/RemoteBlockReader.h" + "${HDFS3_SOURCE_DIR}/client/Token.h" + "${HDFS3_SOURCE_DIR}/client/KerberosName.h" + "${HDFS3_SOURCE_DIR}/client/DirectoryIterator.h" + "${HDFS3_SOURCE_DIR}/client/hdfs.h" + "${HDFS3_SOURCE_DIR}/client/FileSystemStats.h" + "${HDFS3_SOURCE_DIR}/client/FileSystemKey.h" + "${HDFS3_SOURCE_DIR}/client/DataTransferProtocolSender.h" + "${HDFS3_SOURCE_DIR}/client/Packet.h" + "${HDFS3_SOURCE_DIR}/client/PacketHeader.h" + "${HDFS3_SOURCE_DIR}/client/FileSystemInter.h" + "${HDFS3_SOURCE_DIR}/client/LocalBlockReader.h" + "${HDFS3_SOURCE_DIR}/client/TokenInternal.h" + "${HDFS3_SOURCE_DIR}/client/InputStream.h" + "${HDFS3_SOURCE_DIR}/client/PipelineAck.h" + "${HDFS3_SOURCE_DIR}/client/BlockReader.h" + "${HDFS3_SOURCE_DIR}/client/Permission.h" + "${HDFS3_SOURCE_DIR}/client/OutputStreamImpl.h" + "${HDFS3_SOURCE_DIR}/client/LeaseRenewer.h" + "${HDFS3_SOURCE_DIR}/client/UserInfo.h" + "${HDFS3_SOURCE_DIR}/client/PeerCache.h" + "${HDFS3_SOURCE_DIR}/client/OutputStream.h" + "${HDFS3_SOURCE_DIR}/client/FileStatus.h" + "${HDFS3_SOURCE_DIR}/client/DataTransferProtocol.h" + "${HDFS3_SOURCE_DIR}/client/BlockLocation.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcConfig.h" + "${HDFS3_SOURCE_DIR}/rpc/SaslClient.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcAuth.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcClient.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcCall.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcContentWrapper.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcProtocolInfo.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcRemoteCall.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcServerInfo.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcChannel.h" + "${HDFS3_SOURCE_DIR}/rpc/RpcChannelKey.h" + "${HDFS3_SOURCE_DIR}/server/BlockLocalPathInfo.h" + "${HDFS3_SOURCE_DIR}/server/LocatedBlocks.h" + "${HDFS3_SOURCE_DIR}/server/DatanodeInfo.h" + "${HDFS3_SOURCE_DIR}/server/RpcHelper.h" + "${HDFS3_SOURCE_DIR}/server/ExtendedBlock.h" + "${HDFS3_SOURCE_DIR}/server/NamenodeInfo.h" + "${HDFS3_SOURCE_DIR}/server/NamenodeImpl.h" + "${HDFS3_SOURCE_DIR}/server/LocatedBlock.h" + "${HDFS3_SOURCE_DIR}/server/NamenodeProxy.h" + "${HDFS3_SOURCE_DIR}/server/Datanode.h" + "${HDFS3_SOURCE_DIR}/server/Namenode.h" + "${HDFS3_SOURCE_DIR}/common/XmlConfig.h" + "${HDFS3_SOURCE_DIR}/common/Logger.h" + "${HDFS3_SOURCE_DIR}/common/WriteBuffer.h" + "${HDFS3_SOURCE_DIR}/common/HWCrc32c.h" + "${HDFS3_SOURCE_DIR}/common/Checksum.h" + "${HDFS3_SOURCE_DIR}/common/SessionConfig.h" + "${HDFS3_SOURCE_DIR}/common/Unordered.h" + "${HDFS3_SOURCE_DIR}/common/BigEndian.h" + "${HDFS3_SOURCE_DIR}/common/Thread.h" + "${HDFS3_SOURCE_DIR}/common/StackPrinter.h" + "${HDFS3_SOURCE_DIR}/common/Exception.h" + "${HDFS3_SOURCE_DIR}/common/WritableUtils.h" + "${HDFS3_SOURCE_DIR}/common/StringUtil.h" + "${HDFS3_SOURCE_DIR}/common/LruMap.h" + "${HDFS3_SOURCE_DIR}/common/Function.h" + "${HDFS3_SOURCE_DIR}/common/DateTime.h" + "${HDFS3_SOURCE_DIR}/common/Hash.h" + "${HDFS3_SOURCE_DIR}/common/SWCrc32c.h" + "${HDFS3_SOURCE_DIR}/common/ExceptionInternal.h" + "${HDFS3_SOURCE_DIR}/common/Memory.h" + "${HDFS3_SOURCE_DIR}/common/FileWrapper.h" ) # old kernels (< 3.17) doesn't have SYS_getrandom. Always use POSIX implementation to have better compatibility -set_source_files_properties(${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp PROPERTIES COMPILE_FLAGS "-DBOOST_UUID_RANDOM_PROVIDER_FORCE_POSIX=1") +set_source_files_properties("${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp" PROPERTIES COMPILE_FLAGS "-DBOOST_UUID_RANDOM_PROVIDER_FORCE_POSIX=1") # target add_library(hdfs3 ${SRCS} ${PROTO_SOURCES} ${PROTO_HEADERS}) diff --git a/contrib/libpq-cmake/CMakeLists.txt b/contrib/libpq-cmake/CMakeLists.txt index 34c57799a8a..028fabe52b8 100644 --- a/contrib/libpq-cmake/CMakeLists.txt +++ b/contrib/libpq-cmake/CMakeLists.txt @@ -1,58 +1,58 @@ -set(LIBPQ_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libpq) +set(LIBPQ_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpq") set(SRCS - ${LIBPQ_SOURCE_DIR}/fe-auth.c - ${LIBPQ_SOURCE_DIR}/fe-auth-scram.c - ${LIBPQ_SOURCE_DIR}/fe-connect.c - ${LIBPQ_SOURCE_DIR}/fe-exec.c - ${LIBPQ_SOURCE_DIR}/fe-lobj.c - ${LIBPQ_SOURCE_DIR}/fe-misc.c - ${LIBPQ_SOURCE_DIR}/fe-print.c - ${LIBPQ_SOURCE_DIR}/fe-protocol2.c - ${LIBPQ_SOURCE_DIR}/fe-protocol3.c - ${LIBPQ_SOURCE_DIR}/fe-secure.c - ${LIBPQ_SOURCE_DIR}/fe-secure-common.c - ${LIBPQ_SOURCE_DIR}/fe-secure-openssl.c - ${LIBPQ_SOURCE_DIR}/legacy-pqsignal.c - ${LIBPQ_SOURCE_DIR}/libpq-events.c - ${LIBPQ_SOURCE_DIR}/pqexpbuffer.c + "${LIBPQ_SOURCE_DIR}/fe-auth.c" + "${LIBPQ_SOURCE_DIR}/fe-auth-scram.c" + "${LIBPQ_SOURCE_DIR}/fe-connect.c" + "${LIBPQ_SOURCE_DIR}/fe-exec.c" + "${LIBPQ_SOURCE_DIR}/fe-lobj.c" + "${LIBPQ_SOURCE_DIR}/fe-misc.c" + "${LIBPQ_SOURCE_DIR}/fe-print.c" + "${LIBPQ_SOURCE_DIR}/fe-protocol2.c" + "${LIBPQ_SOURCE_DIR}/fe-protocol3.c" + "${LIBPQ_SOURCE_DIR}/fe-secure.c" + "${LIBPQ_SOURCE_DIR}/fe-secure-common.c" + "${LIBPQ_SOURCE_DIR}/fe-secure-openssl.c" + "${LIBPQ_SOURCE_DIR}/legacy-pqsignal.c" + "${LIBPQ_SOURCE_DIR}/libpq-events.c" + "${LIBPQ_SOURCE_DIR}/pqexpbuffer.c" - ${LIBPQ_SOURCE_DIR}/common/scram-common.c - ${LIBPQ_SOURCE_DIR}/common/sha2_openssl.c - ${LIBPQ_SOURCE_DIR}/common/md5.c - ${LIBPQ_SOURCE_DIR}/common/saslprep.c - ${LIBPQ_SOURCE_DIR}/common/unicode_norm.c - ${LIBPQ_SOURCE_DIR}/common/ip.c - ${LIBPQ_SOURCE_DIR}/common/jsonapi.c - ${LIBPQ_SOURCE_DIR}/common/wchar.c - ${LIBPQ_SOURCE_DIR}/common/base64.c - ${LIBPQ_SOURCE_DIR}/common/link-canary.c - ${LIBPQ_SOURCE_DIR}/common/fe_memutils.c - ${LIBPQ_SOURCE_DIR}/common/string.c - ${LIBPQ_SOURCE_DIR}/common/pg_get_line.c - ${LIBPQ_SOURCE_DIR}/common/stringinfo.c - ${LIBPQ_SOURCE_DIR}/common/psprintf.c - ${LIBPQ_SOURCE_DIR}/common/encnames.c - ${LIBPQ_SOURCE_DIR}/common/logging.c + "${LIBPQ_SOURCE_DIR}/common/scram-common.c" + "${LIBPQ_SOURCE_DIR}/common/sha2_openssl.c" + "${LIBPQ_SOURCE_DIR}/common/md5.c" + "${LIBPQ_SOURCE_DIR}/common/saslprep.c" + "${LIBPQ_SOURCE_DIR}/common/unicode_norm.c" + "${LIBPQ_SOURCE_DIR}/common/ip.c" + "${LIBPQ_SOURCE_DIR}/common/jsonapi.c" + "${LIBPQ_SOURCE_DIR}/common/wchar.c" + "${LIBPQ_SOURCE_DIR}/common/base64.c" + "${LIBPQ_SOURCE_DIR}/common/link-canary.c" + "${LIBPQ_SOURCE_DIR}/common/fe_memutils.c" + "${LIBPQ_SOURCE_DIR}/common/string.c" + "${LIBPQ_SOURCE_DIR}/common/pg_get_line.c" + "${LIBPQ_SOURCE_DIR}/common/stringinfo.c" + "${LIBPQ_SOURCE_DIR}/common/psprintf.c" + "${LIBPQ_SOURCE_DIR}/common/encnames.c" + "${LIBPQ_SOURCE_DIR}/common/logging.c" - ${LIBPQ_SOURCE_DIR}/port/snprintf.c - ${LIBPQ_SOURCE_DIR}/port/strlcpy.c - ${LIBPQ_SOURCE_DIR}/port/strerror.c - ${LIBPQ_SOURCE_DIR}/port/inet_net_ntop.c - ${LIBPQ_SOURCE_DIR}/port/getpeereid.c - ${LIBPQ_SOURCE_DIR}/port/chklocale.c - ${LIBPQ_SOURCE_DIR}/port/noblock.c - ${LIBPQ_SOURCE_DIR}/port/pg_strong_random.c - ${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c - ${LIBPQ_SOURCE_DIR}/port/thread.c - ${LIBPQ_SOURCE_DIR}/port/path.c - ${LIBPQ_SOURCE_DIR}/port/explicit_bzero.c + "${LIBPQ_SOURCE_DIR}/port/snprintf.c" + "${LIBPQ_SOURCE_DIR}/port/strlcpy.c" + "${LIBPQ_SOURCE_DIR}/port/strerror.c" + "${LIBPQ_SOURCE_DIR}/port/inet_net_ntop.c" + "${LIBPQ_SOURCE_DIR}/port/getpeereid.c" + "${LIBPQ_SOURCE_DIR}/port/chklocale.c" + "${LIBPQ_SOURCE_DIR}/port/noblock.c" + "${LIBPQ_SOURCE_DIR}/port/pg_strong_random.c" + "${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c" + "${LIBPQ_SOURCE_DIR}/port/thread.c" + "${LIBPQ_SOURCE_DIR}/port/path.c" + "${LIBPQ_SOURCE_DIR}/port/explicit_bzero.c" ) add_library(libpq ${SRCS}) target_include_directories (libpq PUBLIC ${LIBPQ_SOURCE_DIR}) -target_include_directories (libpq PUBLIC ${LIBPQ_SOURCE_DIR}/include) -target_include_directories (libpq PRIVATE ${LIBPQ_SOURCE_DIR}/configs) +target_include_directories (libpq PUBLIC "${LIBPQ_SOURCE_DIR}/include") +target_include_directories (libpq PRIVATE "${LIBPQ_SOURCE_DIR}/configs") target_link_libraries (libpq PRIVATE ssl) diff --git a/contrib/libpqxx-cmake/CMakeLists.txt b/contrib/libpqxx-cmake/CMakeLists.txt index ed372951f82..4edef7bdd82 100644 --- a/contrib/libpqxx-cmake/CMakeLists.txt +++ b/contrib/libpqxx-cmake/CMakeLists.txt @@ -1,70 +1,70 @@ -set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/libpqxx) +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpqxx") set (SRCS - ${LIBRARY_DIR}/src/strconv.cxx - ${LIBRARY_DIR}/src/array.cxx - ${LIBRARY_DIR}/src/binarystring.cxx - ${LIBRARY_DIR}/src/connection.cxx - ${LIBRARY_DIR}/src/cursor.cxx - ${LIBRARY_DIR}/src/encodings.cxx - ${LIBRARY_DIR}/src/errorhandler.cxx - ${LIBRARY_DIR}/src/except.cxx - ${LIBRARY_DIR}/src/field.cxx - ${LIBRARY_DIR}/src/largeobject.cxx - ${LIBRARY_DIR}/src/notification.cxx - ${LIBRARY_DIR}/src/pipeline.cxx - ${LIBRARY_DIR}/src/result.cxx - ${LIBRARY_DIR}/src/robusttransaction.cxx - ${LIBRARY_DIR}/src/sql_cursor.cxx - ${LIBRARY_DIR}/src/stream_from.cxx - ${LIBRARY_DIR}/src/stream_to.cxx - ${LIBRARY_DIR}/src/subtransaction.cxx - ${LIBRARY_DIR}/src/transaction.cxx - ${LIBRARY_DIR}/src/transaction_base.cxx - ${LIBRARY_DIR}/src/row.cxx - ${LIBRARY_DIR}/src/util.cxx - ${LIBRARY_DIR}/src/version.cxx + "${LIBRARY_DIR}/src/strconv.cxx" + "${LIBRARY_DIR}/src/array.cxx" + "${LIBRARY_DIR}/src/binarystring.cxx" + "${LIBRARY_DIR}/src/connection.cxx" + "${LIBRARY_DIR}/src/cursor.cxx" + "${LIBRARY_DIR}/src/encodings.cxx" + "${LIBRARY_DIR}/src/errorhandler.cxx" + "${LIBRARY_DIR}/src/except.cxx" + "${LIBRARY_DIR}/src/field.cxx" + "${LIBRARY_DIR}/src/largeobject.cxx" + "${LIBRARY_DIR}/src/notification.cxx" + "${LIBRARY_DIR}/src/pipeline.cxx" + "${LIBRARY_DIR}/src/result.cxx" + "${LIBRARY_DIR}/src/robusttransaction.cxx" + "${LIBRARY_DIR}/src/sql_cursor.cxx" + "${LIBRARY_DIR}/src/stream_from.cxx" + "${LIBRARY_DIR}/src/stream_to.cxx" + "${LIBRARY_DIR}/src/subtransaction.cxx" + "${LIBRARY_DIR}/src/transaction.cxx" + "${LIBRARY_DIR}/src/transaction_base.cxx" + "${LIBRARY_DIR}/src/row.cxx" + "${LIBRARY_DIR}/src/util.cxx" + "${LIBRARY_DIR}/src/version.cxx" ) # Need to explicitly include each header file, because in the directory include/pqxx there are also files # like just 'array'. So if including the whole directory with `target_include_directories`, it will make # conflicts with all includes of . set (HDRS - ${LIBRARY_DIR}/include/pqxx/array.hxx - ${LIBRARY_DIR}/include/pqxx/binarystring.hxx - ${LIBRARY_DIR}/include/pqxx/composite.hxx - ${LIBRARY_DIR}/include/pqxx/connection.hxx - ${LIBRARY_DIR}/include/pqxx/cursor.hxx - ${LIBRARY_DIR}/include/pqxx/dbtransaction.hxx - ${LIBRARY_DIR}/include/pqxx/errorhandler.hxx - ${LIBRARY_DIR}/include/pqxx/except.hxx - ${LIBRARY_DIR}/include/pqxx/field.hxx - ${LIBRARY_DIR}/include/pqxx/isolation.hxx - ${LIBRARY_DIR}/include/pqxx/largeobject.hxx - ${LIBRARY_DIR}/include/pqxx/nontransaction.hxx - ${LIBRARY_DIR}/include/pqxx/notification.hxx - ${LIBRARY_DIR}/include/pqxx/pipeline.hxx - ${LIBRARY_DIR}/include/pqxx/prepared_statement.hxx - ${LIBRARY_DIR}/include/pqxx/result.hxx - ${LIBRARY_DIR}/include/pqxx/robusttransaction.hxx - ${LIBRARY_DIR}/include/pqxx/row.hxx - ${LIBRARY_DIR}/include/pqxx/separated_list.hxx - ${LIBRARY_DIR}/include/pqxx/strconv.hxx - ${LIBRARY_DIR}/include/pqxx/stream_from.hxx - ${LIBRARY_DIR}/include/pqxx/stream_to.hxx - ${LIBRARY_DIR}/include/pqxx/subtransaction.hxx - ${LIBRARY_DIR}/include/pqxx/transaction.hxx - ${LIBRARY_DIR}/include/pqxx/transaction_base.hxx - ${LIBRARY_DIR}/include/pqxx/types.hxx - ${LIBRARY_DIR}/include/pqxx/util.hxx - ${LIBRARY_DIR}/include/pqxx/version.hxx - ${LIBRARY_DIR}/include/pqxx/zview.hxx + "${LIBRARY_DIR}/include/pqxx/array.hxx" + "${LIBRARY_DIR}/include/pqxx/binarystring.hxx" + "${LIBRARY_DIR}/include/pqxx/composite.hxx" + "${LIBRARY_DIR}/include/pqxx/connection.hxx" + "${LIBRARY_DIR}/include/pqxx/cursor.hxx" + "${LIBRARY_DIR}/include/pqxx/dbtransaction.hxx" + "${LIBRARY_DIR}/include/pqxx/errorhandler.hxx" + "${LIBRARY_DIR}/include/pqxx/except.hxx" + "${LIBRARY_DIR}/include/pqxx/field.hxx" + "${LIBRARY_DIR}/include/pqxx/isolation.hxx" + "${LIBRARY_DIR}/include/pqxx/largeobject.hxx" + "${LIBRARY_DIR}/include/pqxx/nontransaction.hxx" + "${LIBRARY_DIR}/include/pqxx/notification.hxx" + "${LIBRARY_DIR}/include/pqxx/pipeline.hxx" + "${LIBRARY_DIR}/include/pqxx/prepared_statement.hxx" + "${LIBRARY_DIR}/include/pqxx/result.hxx" + "${LIBRARY_DIR}/include/pqxx/robusttransaction.hxx" + "${LIBRARY_DIR}/include/pqxx/row.hxx" + "${LIBRARY_DIR}/include/pqxx/separated_list.hxx" + "${LIBRARY_DIR}/include/pqxx/strconv.hxx" + "${LIBRARY_DIR}/include/pqxx/stream_from.hxx" + "${LIBRARY_DIR}/include/pqxx/stream_to.hxx" + "${LIBRARY_DIR}/include/pqxx/subtransaction.hxx" + "${LIBRARY_DIR}/include/pqxx/transaction.hxx" + "${LIBRARY_DIR}/include/pqxx/transaction_base.hxx" + "${LIBRARY_DIR}/include/pqxx/types.hxx" + "${LIBRARY_DIR}/include/pqxx/util.hxx" + "${LIBRARY_DIR}/include/pqxx/version.hxx" + "${LIBRARY_DIR}/include/pqxx/zview.hxx" ) add_library(libpqxx ${SRCS} ${HDRS}) target_link_libraries(libpqxx PUBLIC ${LIBPQ_LIBRARY}) -target_include_directories (libpqxx PRIVATE ${LIBRARY_DIR}/include) +target_include_directories (libpqxx PRIVATE "${LIBRARY_DIR}/include") # crutch set(CM_CONFIG_H_IN "${LIBRARY_DIR}/include/pqxx/config.h.in") diff --git a/contrib/librdkafka b/contrib/librdkafka index cf11d0aa36d..43491d33ca2 160000 --- a/contrib/librdkafka +++ b/contrib/librdkafka @@ -1 +1 @@ -Subproject commit cf11d0aa36d4738f2c9bf4377807661660f1be76 +Subproject commit 43491d33ca2826531d1e3cae70d4bf1e5249e3c9 diff --git a/contrib/librdkafka-cmake/CMakeLists.txt b/contrib/librdkafka-cmake/CMakeLists.txt index 2b55b22cd2b..97b6a7e1ec5 100644 --- a/contrib/librdkafka-cmake/CMakeLists.txt +++ b/contrib/librdkafka-cmake/CMakeLists.txt @@ -1,83 +1,83 @@ -set(RDKAFKA_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/librdkafka/src) +set(RDKAFKA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/librdkafka/src") set(SRCS - ${RDKAFKA_SOURCE_DIR}/crc32c.c -# ${RDKAFKA_SOURCE_DIR}/lz4.c -# ${RDKAFKA_SOURCE_DIR}/lz4frame.c -# ${RDKAFKA_SOURCE_DIR}/lz4hc.c - ${RDKAFKA_SOURCE_DIR}/rdaddr.c - ${RDKAFKA_SOURCE_DIR}/rdavl.c - ${RDKAFKA_SOURCE_DIR}/rdbuf.c - ${RDKAFKA_SOURCE_DIR}/rdcrc32.c - ${RDKAFKA_SOURCE_DIR}/rddl.c - ${RDKAFKA_SOURCE_DIR}/rdfnv1a.c - ${RDKAFKA_SOURCE_DIR}/rdgz.c - ${RDKAFKA_SOURCE_DIR}/rdhdrhistogram.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_admin.c # looks optional - ${RDKAFKA_SOURCE_DIR}/rdkafka_assignment.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_assignor.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_aux.c # looks optional - ${RDKAFKA_SOURCE_DIR}/rdkafka_background.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_broker.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_buf.c - ${RDKAFKA_SOURCE_DIR}/rdkafka.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_cert.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_cgrp.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_conf.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_coord.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_error.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_event.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_feature.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_header.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_idempotence.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_interceptor.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_lz4.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_metadata.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_metadata_cache.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_mock.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_mock_cgrp.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_mock_handlers.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_msg.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_reader.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_writer.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_offset.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_op.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_partition.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_pattern.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_plugin.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_queue.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_range_assignor.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_request.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_roundrobin_assignor.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl.c -# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c # optionally included below -# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c # optionally included below - ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_plain.c -# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c # optionally included below -# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_win32.c -# ${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c # optionally included below - ${RDKAFKA_SOURCE_DIR}/rdkafka_sticky_assignor.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_subscription.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_timer.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_topic.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_transport.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_txnmgr.c - ${RDKAFKA_SOURCE_DIR}/rdkafka_zstd.c - ${RDKAFKA_SOURCE_DIR}/rdlist.c - ${RDKAFKA_SOURCE_DIR}/rdlog.c - ${RDKAFKA_SOURCE_DIR}/rdmap.c - ${RDKAFKA_SOURCE_DIR}/rdmurmur2.c - ${RDKAFKA_SOURCE_DIR}/rdports.c - ${RDKAFKA_SOURCE_DIR}/rdrand.c - ${RDKAFKA_SOURCE_DIR}/rdregex.c - ${RDKAFKA_SOURCE_DIR}/rdstring.c - ${RDKAFKA_SOURCE_DIR}/rdunittest.c - ${RDKAFKA_SOURCE_DIR}/rdvarint.c - ${RDKAFKA_SOURCE_DIR}/rdxxhash.c - # ${RDKAFKA_SOURCE_DIR}/regexp.c - ${RDKAFKA_SOURCE_DIR}/snappy.c - ${RDKAFKA_SOURCE_DIR}/tinycthread.c - ${RDKAFKA_SOURCE_DIR}/tinycthread_extra.c + "${RDKAFKA_SOURCE_DIR}/crc32c.c" +# "${RDKAFKA_SOURCE_DIR}/lz4.c" +# "${RDKAFKA_SOURCE_DIR}/lz4frame.c" +# "${RDKAFKA_SOURCE_DIR}/lz4hc.c" + "${RDKAFKA_SOURCE_DIR}/rdaddr.c" + "${RDKAFKA_SOURCE_DIR}/rdavl.c" + "${RDKAFKA_SOURCE_DIR}/rdbuf.c" + "${RDKAFKA_SOURCE_DIR}/rdcrc32.c" + "${RDKAFKA_SOURCE_DIR}/rddl.c" + "${RDKAFKA_SOURCE_DIR}/rdfnv1a.c" + "${RDKAFKA_SOURCE_DIR}/rdgz.c" + "${RDKAFKA_SOURCE_DIR}/rdhdrhistogram.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_admin.c" # looks optional + "${RDKAFKA_SOURCE_DIR}/rdkafka_assignment.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_assignor.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_aux.c" # looks optional + "${RDKAFKA_SOURCE_DIR}/rdkafka_background.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_broker.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_buf.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_cert.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_cgrp.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_conf.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_coord.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_error.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_event.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_feature.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_header.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_idempotence.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_interceptor.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_lz4.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_metadata.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_metadata_cache.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_mock.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_mock_cgrp.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_mock_handlers.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_msg.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_reader.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_msgset_writer.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_offset.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_op.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_partition.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_pattern.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_plugin.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_queue.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_range_assignor.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_request.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_roundrobin_assignor.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl.c" +# "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c" # optionally included below +# "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c" # optionally included below + "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_plain.c" +# "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c" # optionally included below +# "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_win32.c" +# "${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c" # optionally included below + "${RDKAFKA_SOURCE_DIR}/rdkafka_sticky_assignor.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_subscription.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_timer.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_topic.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_transport.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_txnmgr.c" + "${RDKAFKA_SOURCE_DIR}/rdkafka_zstd.c" + "${RDKAFKA_SOURCE_DIR}/rdlist.c" + "${RDKAFKA_SOURCE_DIR}/rdlog.c" + "${RDKAFKA_SOURCE_DIR}/rdmap.c" + "${RDKAFKA_SOURCE_DIR}/rdmurmur2.c" + "${RDKAFKA_SOURCE_DIR}/rdports.c" + "${RDKAFKA_SOURCE_DIR}/rdrand.c" + "${RDKAFKA_SOURCE_DIR}/rdregex.c" + "${RDKAFKA_SOURCE_DIR}/rdstring.c" + "${RDKAFKA_SOURCE_DIR}/rdunittest.c" + "${RDKAFKA_SOURCE_DIR}/rdvarint.c" + "${RDKAFKA_SOURCE_DIR}/rdxxhash.c" + # "${RDKAFKA_SOURCE_DIR}/regexp.c" + "${RDKAFKA_SOURCE_DIR}/snappy.c" + "${RDKAFKA_SOURCE_DIR}/tinycthread.c" + "${RDKAFKA_SOURCE_DIR}/tinycthread_extra.c" ) if(${ENABLE_CYRUS_SASL}) @@ -96,28 +96,28 @@ if(OPENSSL_FOUND) endif() if(WITH_SSL) - list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c) + list(APPEND SRCS "${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c") endif() if(WITH_SASL_CYRUS) - list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c) # needed to support Kerberos, requires cyrus-sasl + list(APPEND SRCS "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c") # needed to support Kerberos, requires cyrus-sasl endif() if(WITH_SASL_SCRAM) - list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c) + list(APPEND SRCS "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c") endif() if(WITH_SASL_OAUTHBEARER) - list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c) + list(APPEND SRCS "${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c") endif() add_library(rdkafka ${SRCS}) target_compile_options(rdkafka PRIVATE -fno-sanitize=undefined) # target_include_directories(rdkafka SYSTEM PUBLIC include) -target_include_directories(rdkafka SYSTEM PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) # for "librdkafka/rdkafka.h" +target_include_directories(rdkafka SYSTEM PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include") # for "librdkafka/rdkafka.h" target_include_directories(rdkafka SYSTEM PUBLIC ${RDKAFKA_SOURCE_DIR}) # Because weird logic with "include_next" is used. -target_include_directories(rdkafka SYSTEM PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/auxdir) # for "../config.h" -target_include_directories(rdkafka SYSTEM PRIVATE ${ZSTD_INCLUDE_DIR}/common) # Because wrong path to "zstd_errors.h" is used. +target_include_directories(rdkafka SYSTEM PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/auxdir") # for "../config.h" +target_include_directories(rdkafka SYSTEM PRIVATE "${ZSTD_INCLUDE_DIR}/common") # Because wrong path to "zstd_errors.h" is used. target_link_libraries(rdkafka PRIVATE lz4 ${ZLIB_LIBRARIES} ${ZSTD_LIBRARY}) if(OPENSSL_SSL_LIBRARY AND OPENSSL_CRYPTO_LIBRARY) target_link_libraries(rdkafka PRIVATE ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) @@ -126,7 +126,7 @@ if(${ENABLE_CYRUS_SASL}) target_link_libraries(rdkafka PRIVATE ${CYRUS_SASL_LIBRARY}) endif() -file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/auxdir) +file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/auxdir") configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/config.h.in" diff --git a/contrib/librdkafka-cmake/config.h.in b/contrib/librdkafka-cmake/config.h.in index 80b6ea61b6e..9fecb45e42d 100644 --- a/contrib/librdkafka-cmake/config.h.in +++ b/contrib/librdkafka-cmake/config.h.in @@ -66,7 +66,7 @@ #cmakedefine WITH_SASL_OAUTHBEARER 1 #cmakedefine WITH_SASL_CYRUS 1 // crc32chw -#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) +#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) && !(defined(__aarch64__) && defined(__APPLE__)) #define WITH_CRC32C_HW 1 #endif // regex @@ -75,6 +75,8 @@ #define HAVE_STRNDUP 1 // strerror_r #define HAVE_STRERROR_R 1 +// rand_r +#define HAVE_RAND_R 1 #ifdef __APPLE__ // pthread_setname_np diff --git a/contrib/libunwind-cmake/CMakeLists.txt b/contrib/libunwind-cmake/CMakeLists.txt index 3afff30eee7..1a9f5e50abd 100644 --- a/contrib/libunwind-cmake/CMakeLists.txt +++ b/contrib/libunwind-cmake/CMakeLists.txt @@ -1,27 +1,27 @@ include(CheckCCompilerFlag) include(CheckCXXCompilerFlag) -set(LIBUNWIND_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libunwind) +set(LIBUNWIND_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libunwind") set(LIBUNWIND_CXX_SOURCES - ${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp - ${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp - ${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp) + "${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp" + "${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp" + "${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp") if (APPLE) - set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} ${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp) + set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} "${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp") endif () set(LIBUNWIND_C_SOURCES - ${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c - ${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1-gcc-ext.c - ${LIBUNWIND_SOURCE_DIR}/src/Unwind-sjlj.c + "${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c" + "${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1-gcc-ext.c" + "${LIBUNWIND_SOURCE_DIR}/src/Unwind-sjlj.c" # Use unw_backtrace to override libgcc's backtrace symbol for better ABI compatibility unwind-override.c) set_source_files_properties(${LIBUNWIND_C_SOURCES} PROPERTIES COMPILE_FLAGS "-std=c99") set(LIBUNWIND_ASM_SOURCES - ${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersRestore.S - ${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersSave.S) + "${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersRestore.S" + "${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersSave.S") # CMake doesn't pass the correct architecture for Apple prior to CMake 3.19 [1] # Workaround these two issues by compiling as C. diff --git a/contrib/libxml2-cmake/CMakeLists.txt b/contrib/libxml2-cmake/CMakeLists.txt index 068662c7213..8fda0399ea3 100644 --- a/contrib/libxml2-cmake/CMakeLists.txt +++ b/contrib/libxml2-cmake/CMakeLists.txt @@ -1,54 +1,54 @@ -set(LIBXML2_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libxml2) -set(LIBXML2_BINARY_DIR ${ClickHouse_BINARY_DIR}/contrib/libxml2) +set(LIBXML2_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libxml2") +set(LIBXML2_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/libxml2") set(SRCS - ${LIBXML2_SOURCE_DIR}/SAX.c - ${LIBXML2_SOURCE_DIR}/entities.c - ${LIBXML2_SOURCE_DIR}/encoding.c - ${LIBXML2_SOURCE_DIR}/error.c - ${LIBXML2_SOURCE_DIR}/parserInternals.c - ${LIBXML2_SOURCE_DIR}/parser.c - ${LIBXML2_SOURCE_DIR}/tree.c - ${LIBXML2_SOURCE_DIR}/hash.c - ${LIBXML2_SOURCE_DIR}/list.c - ${LIBXML2_SOURCE_DIR}/xmlIO.c - ${LIBXML2_SOURCE_DIR}/xmlmemory.c - ${LIBXML2_SOURCE_DIR}/uri.c - ${LIBXML2_SOURCE_DIR}/valid.c - ${LIBXML2_SOURCE_DIR}/xlink.c - ${LIBXML2_SOURCE_DIR}/HTMLparser.c - ${LIBXML2_SOURCE_DIR}/HTMLtree.c - ${LIBXML2_SOURCE_DIR}/debugXML.c - ${LIBXML2_SOURCE_DIR}/xpath.c - ${LIBXML2_SOURCE_DIR}/xpointer.c - ${LIBXML2_SOURCE_DIR}/xinclude.c - ${LIBXML2_SOURCE_DIR}/nanohttp.c - ${LIBXML2_SOURCE_DIR}/nanoftp.c - ${LIBXML2_SOURCE_DIR}/DOCBparser.c - ${LIBXML2_SOURCE_DIR}/catalog.c - ${LIBXML2_SOURCE_DIR}/globals.c - ${LIBXML2_SOURCE_DIR}/threads.c - ${LIBXML2_SOURCE_DIR}/c14n.c - ${LIBXML2_SOURCE_DIR}/xmlstring.c - ${LIBXML2_SOURCE_DIR}/buf.c - ${LIBXML2_SOURCE_DIR}/xmlregexp.c - ${LIBXML2_SOURCE_DIR}/xmlschemas.c - ${LIBXML2_SOURCE_DIR}/xmlschemastypes.c - ${LIBXML2_SOURCE_DIR}/xmlunicode.c - ${LIBXML2_SOURCE_DIR}/triostr.c - #${LIBXML2_SOURCE_DIR}/trio.c - ${LIBXML2_SOURCE_DIR}/xmlreader.c - ${LIBXML2_SOURCE_DIR}/relaxng.c - ${LIBXML2_SOURCE_DIR}/dict.c - ${LIBXML2_SOURCE_DIR}/SAX2.c - ${LIBXML2_SOURCE_DIR}/xmlwriter.c - ${LIBXML2_SOURCE_DIR}/legacy.c - ${LIBXML2_SOURCE_DIR}/chvalid.c - ${LIBXML2_SOURCE_DIR}/pattern.c - ${LIBXML2_SOURCE_DIR}/xmlsave.c - ${LIBXML2_SOURCE_DIR}/xmlmodule.c - ${LIBXML2_SOURCE_DIR}/schematron.c - ${LIBXML2_SOURCE_DIR}/xzlib.c + "${LIBXML2_SOURCE_DIR}/SAX.c" + "${LIBXML2_SOURCE_DIR}/entities.c" + "${LIBXML2_SOURCE_DIR}/encoding.c" + "${LIBXML2_SOURCE_DIR}/error.c" + "${LIBXML2_SOURCE_DIR}/parserInternals.c" + "${LIBXML2_SOURCE_DIR}/parser.c" + "${LIBXML2_SOURCE_DIR}/tree.c" + "${LIBXML2_SOURCE_DIR}/hash.c" + "${LIBXML2_SOURCE_DIR}/list.c" + "${LIBXML2_SOURCE_DIR}/xmlIO.c" + "${LIBXML2_SOURCE_DIR}/xmlmemory.c" + "${LIBXML2_SOURCE_DIR}/uri.c" + "${LIBXML2_SOURCE_DIR}/valid.c" + "${LIBXML2_SOURCE_DIR}/xlink.c" + "${LIBXML2_SOURCE_DIR}/HTMLparser.c" + "${LIBXML2_SOURCE_DIR}/HTMLtree.c" + "${LIBXML2_SOURCE_DIR}/debugXML.c" + "${LIBXML2_SOURCE_DIR}/xpath.c" + "${LIBXML2_SOURCE_DIR}/xpointer.c" + "${LIBXML2_SOURCE_DIR}/xinclude.c" + "${LIBXML2_SOURCE_DIR}/nanohttp.c" + "${LIBXML2_SOURCE_DIR}/nanoftp.c" + "${LIBXML2_SOURCE_DIR}/DOCBparser.c" + "${LIBXML2_SOURCE_DIR}/catalog.c" + "${LIBXML2_SOURCE_DIR}/globals.c" + "${LIBXML2_SOURCE_DIR}/threads.c" + "${LIBXML2_SOURCE_DIR}/c14n.c" + "${LIBXML2_SOURCE_DIR}/xmlstring.c" + "${LIBXML2_SOURCE_DIR}/buf.c" + "${LIBXML2_SOURCE_DIR}/xmlregexp.c" + "${LIBXML2_SOURCE_DIR}/xmlschemas.c" + "${LIBXML2_SOURCE_DIR}/xmlschemastypes.c" + "${LIBXML2_SOURCE_DIR}/xmlunicode.c" + "${LIBXML2_SOURCE_DIR}/triostr.c" + #"${LIBXML2_SOURCE_DIR}/trio.c" + "${LIBXML2_SOURCE_DIR}/xmlreader.c" + "${LIBXML2_SOURCE_DIR}/relaxng.c" + "${LIBXML2_SOURCE_DIR}/dict.c" + "${LIBXML2_SOURCE_DIR}/SAX2.c" + "${LIBXML2_SOURCE_DIR}/xmlwriter.c" + "${LIBXML2_SOURCE_DIR}/legacy.c" + "${LIBXML2_SOURCE_DIR}/chvalid.c" + "${LIBXML2_SOURCE_DIR}/pattern.c" + "${LIBXML2_SOURCE_DIR}/xmlsave.c" + "${LIBXML2_SOURCE_DIR}/xmlmodule.c" + "${LIBXML2_SOURCE_DIR}/schematron.c" + "${LIBXML2_SOURCE_DIR}/xzlib.c" ) add_library(libxml2 ${SRCS}) @@ -57,6 +57,6 @@ if(M_LIBRARY) target_link_libraries(libxml2 PRIVATE ${M_LIBRARY}) endif() -target_include_directories(libxml2 PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include) -target_include_directories(libxml2 PUBLIC ${LIBXML2_SOURCE_DIR}/include) +target_include_directories(libxml2 PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include") +target_include_directories(libxml2 PUBLIC "${LIBXML2_SOURCE_DIR}/include") target_include_directories(libxml2 SYSTEM BEFORE PRIVATE ${ZLIB_INCLUDE_DIR}) diff --git a/contrib/llvm b/contrib/llvm index 8f24d507c1c..cfaf365cf96 160000 --- a/contrib/llvm +++ b/contrib/llvm @@ -1 +1 @@ -Subproject commit 8f24d507c1cfeec66d27f48fe74518fd278e2d25 +Subproject commit cfaf365cf96918999d09d976ec736b4518cf5d02 diff --git a/contrib/lz4-cmake/CMakeLists.txt b/contrib/lz4-cmake/CMakeLists.txt index 72510d72534..77e00d4295b 100644 --- a/contrib/lz4-cmake/CMakeLists.txt +++ b/contrib/lz4-cmake/CMakeLists.txt @@ -33,5 +33,5 @@ if (NOT EXTERNAL_LZ4_LIBRARY_FOUND) if (SANITIZE STREQUAL "undefined") target_compile_options (lz4 PRIVATE -fno-sanitize=undefined) endif () - target_include_directories(lz4 PUBLIC ${LIBRARY_DIR}/lib) + target_include_directories(lz4 PUBLIC "${LIBRARY_DIR}/lib") endif () diff --git a/contrib/mariadb-connector-c b/contrib/mariadb-connector-c index f4476ee7311..5f4034a3a63 160000 --- a/contrib/mariadb-connector-c +++ b/contrib/mariadb-connector-c @@ -1 +1 @@ -Subproject commit f4476ee7311b35b593750f6ae2cbdb62a4006374 +Subproject commit 5f4034a3a6376416504f17186c55fe401c6d8e5e diff --git a/contrib/nanodbc b/contrib/nanodbc new file mode 160000 index 00000000000..9fc45967551 --- /dev/null +++ b/contrib/nanodbc @@ -0,0 +1 @@ +Subproject commit 9fc459675515d491401727ec67fca38db721f28c diff --git a/contrib/nanodbc-cmake/CMakeLists.txt b/contrib/nanodbc-cmake/CMakeLists.txt new file mode 100644 index 00000000000..26a030c3995 --- /dev/null +++ b/contrib/nanodbc-cmake/CMakeLists.txt @@ -0,0 +1,18 @@ +if (NOT USE_INTERNAL_NANODBC_LIBRARY) + return () +endif () + +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/nanodbc") + +if (NOT TARGET unixodbc) + message(FATAL_ERROR "Configuration error: unixodbc is not a target") +endif() + +set (SRCS + "${LIBRARY_DIR}/nanodbc/nanodbc.cpp" +) + +add_library(nanodbc ${SRCS}) + +target_link_libraries (nanodbc PUBLIC unixodbc) +target_include_directories (nanodbc SYSTEM PUBLIC "${LIBRARY_DIR}/") diff --git a/contrib/nuraft-cmake/CMakeLists.txt b/contrib/nuraft-cmake/CMakeLists.txt index 83137fe73bf..725e86195e1 100644 --- a/contrib/nuraft-cmake/CMakeLists.txt +++ b/contrib/nuraft-cmake/CMakeLists.txt @@ -1,30 +1,30 @@ -set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/NuRaft) +set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/NuRaft") set(SRCS - ${LIBRARY_DIR}/src/handle_priority.cxx - ${LIBRARY_DIR}/src/buffer_serializer.cxx - ${LIBRARY_DIR}/src/peer.cxx - ${LIBRARY_DIR}/src/global_mgr.cxx - ${LIBRARY_DIR}/src/buffer.cxx - ${LIBRARY_DIR}/src/asio_service.cxx - ${LIBRARY_DIR}/src/handle_client_request.cxx - ${LIBRARY_DIR}/src/raft_server.cxx - ${LIBRARY_DIR}/src/snapshot.cxx - ${LIBRARY_DIR}/src/handle_commit.cxx - ${LIBRARY_DIR}/src/error_code.cxx - ${LIBRARY_DIR}/src/crc32.cxx - ${LIBRARY_DIR}/src/handle_snapshot_sync.cxx - ${LIBRARY_DIR}/src/stat_mgr.cxx - ${LIBRARY_DIR}/src/handle_join_leave.cxx - ${LIBRARY_DIR}/src/handle_user_cmd.cxx - ${LIBRARY_DIR}/src/handle_custom_notification.cxx - ${LIBRARY_DIR}/src/handle_vote.cxx - ${LIBRARY_DIR}/src/launcher.cxx - ${LIBRARY_DIR}/src/srv_config.cxx - ${LIBRARY_DIR}/src/snapshot_sync_req.cxx - ${LIBRARY_DIR}/src/handle_timeout.cxx - ${LIBRARY_DIR}/src/handle_append_entries.cxx - ${LIBRARY_DIR}/src/cluster_config.cxx + "${LIBRARY_DIR}/src/handle_priority.cxx" + "${LIBRARY_DIR}/src/buffer_serializer.cxx" + "${LIBRARY_DIR}/src/peer.cxx" + "${LIBRARY_DIR}/src/global_mgr.cxx" + "${LIBRARY_DIR}/src/buffer.cxx" + "${LIBRARY_DIR}/src/asio_service.cxx" + "${LIBRARY_DIR}/src/handle_client_request.cxx" + "${LIBRARY_DIR}/src/raft_server.cxx" + "${LIBRARY_DIR}/src/snapshot.cxx" + "${LIBRARY_DIR}/src/handle_commit.cxx" + "${LIBRARY_DIR}/src/error_code.cxx" + "${LIBRARY_DIR}/src/crc32.cxx" + "${LIBRARY_DIR}/src/handle_snapshot_sync.cxx" + "${LIBRARY_DIR}/src/stat_mgr.cxx" + "${LIBRARY_DIR}/src/handle_join_leave.cxx" + "${LIBRARY_DIR}/src/handle_user_cmd.cxx" + "${LIBRARY_DIR}/src/handle_custom_notification.cxx" + "${LIBRARY_DIR}/src/handle_vote.cxx" + "${LIBRARY_DIR}/src/launcher.cxx" + "${LIBRARY_DIR}/src/srv_config.cxx" + "${LIBRARY_DIR}/src/snapshot_sync_req.cxx" + "${LIBRARY_DIR}/src/handle_timeout.cxx" + "${LIBRARY_DIR}/src/handle_append_entries.cxx" + "${LIBRARY_DIR}/src/cluster_config.cxx" ) @@ -37,9 +37,9 @@ else() target_compile_definitions(nuraft PRIVATE USE_BOOST_ASIO=1 BOOST_ASIO_STANDALONE=1) endif() -target_include_directories (nuraft SYSTEM PRIVATE ${LIBRARY_DIR}/include/libnuraft) +target_include_directories (nuraft SYSTEM PRIVATE "${LIBRARY_DIR}/include/libnuraft") # for some reason include "asio.h" directly without "boost/" prefix. -target_include_directories (nuraft SYSTEM PRIVATE ${ClickHouse_SOURCE_DIR}/contrib/boost/boost) +target_include_directories (nuraft SYSTEM PRIVATE "${ClickHouse_SOURCE_DIR}/contrib/boost/boost") target_link_libraries (nuraft PRIVATE boost::headers_only boost::coroutine) @@ -47,4 +47,4 @@ if(OPENSSL_SSL_LIBRARY AND OPENSSL_CRYPTO_LIBRARY) target_link_libraries (nuraft PRIVATE ${OPENSSL_SSL_LIBRARY} ${OPENSSL_CRYPTO_LIBRARY}) endif() -target_include_directories (nuraft SYSTEM PUBLIC ${LIBRARY_DIR}/include) +target_include_directories (nuraft SYSTEM PUBLIC "${LIBRARY_DIR}/include") diff --git a/contrib/openldap-cmake/CMakeLists.txt b/contrib/openldap-cmake/CMakeLists.txt index b0a5f4048ff..0892403bb62 100644 --- a/contrib/openldap-cmake/CMakeLists.txt +++ b/contrib/openldap-cmake/CMakeLists.txt @@ -1,4 +1,4 @@ -set(OPENLDAP_SOURCE_DIR ${ClickHouse_SOURCE_DIR}/contrib/openldap) +set(OPENLDAP_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap") # How these lists were generated? # I compiled the original OpenLDAP with it's original build system and copied the list of source files from build commands. @@ -12,9 +12,9 @@ set(OPENLDAP_VERSION_STRING "2.5.X") macro(mkversion _lib_name) add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_lib_name}-version.c - COMMAND ${CMAKE_COMMAND} -E env bash -c "${OPENLDAP_SOURCE_DIR}/build/mkversion -v '${OPENLDAP_VERSION_STRING}' liblber.la > ${CMAKE_CURRENT_BINARY_DIR}/${_lib_name}-version.c" - MAIN_DEPENDENCY ${OPENLDAP_SOURCE_DIR}/build/mkversion + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${_lib_name}-version.c" + COMMAND ${CMAKE_COMMAND} -E env bash -c "${OPENLDAP_SOURCE_DIR}/build/mkversion -v '${OPENLDAP_VERSION_STRING}' liblber.la > \"${CMAKE_CURRENT_BINARY_DIR}/${_lib_name}-version.c\"" + MAIN_DEPENDENCY "${OPENLDAP_SOURCE_DIR}/build/mkversion" WORKING_DIRECTORY ${OPENLDAP_SOURCE_DIR} VERBATIM ) @@ -37,23 +37,23 @@ endif() set(_extra_build_dir "${CMAKE_CURRENT_SOURCE_DIR}/${_system_name}_${_system_processor}") set(_lber_srcs - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/assert.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/decode.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/encode.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/io.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/bprint.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/debug.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/memory.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/options.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/sockbuf.c - ${OPENLDAP_SOURCE_DIR}/libraries/liblber/stdio.c + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/assert.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/decode.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/encode.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/io.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/bprint.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/debug.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/memory.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/options.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/sockbuf.c" + "${OPENLDAP_SOURCE_DIR}/libraries/liblber/stdio.c" ) mkversion(lber) add_library(lber ${_libs_type} ${_lber_srcs} - ${CMAKE_CURRENT_BINARY_DIR}/lber-version.c + "${CMAKE_CURRENT_BINARY_DIR}/lber-version.c" ) target_link_libraries(lber @@ -62,8 +62,8 @@ target_link_libraries(lber target_include_directories(lber PRIVATE ${_extra_build_dir}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/liblber + PRIVATE "${OPENLDAP_SOURCE_DIR}/include" + PRIVATE "${OPENLDAP_SOURCE_DIR}/libraries/liblber" PRIVATE ${OPENSSL_INCLUDE_DIR} ) @@ -72,78 +72,78 @@ target_compile_definitions(lber ) set(_ldap_srcs - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/bind.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/open.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/result.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/error.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/compare.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/search.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/controls.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/messages.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/references.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/extended.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/cyrus.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/modify.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/add.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/modrdn.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/delete.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/abandon.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sasl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sbind.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/unbind.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/cancel.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/filter.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/free.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sort.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/passwd.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/whoami.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/vc.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getdn.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getentry.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getattr.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/getvalues.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/addentry.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/request.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-ip.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/url.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/pagectrl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/sortctrl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/vlvctrl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/init.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/options.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/print.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/string.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/util-int.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/schema.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/charray.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-local.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/dnssrv.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8-conv.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls2.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_o.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_g.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/turn.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ppolicy.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/dds.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/txn.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldap_sync.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/stctrl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/assertion.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/deref.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldifutil.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldif.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/fetch.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/lbase64.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/msctrl.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap/psearchctrl.c + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/bind.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/open.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/result.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/error.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/compare.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/search.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/controls.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/messages.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/references.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/extended.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/cyrus.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/modify.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/add.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/modrdn.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/delete.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/abandon.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/sasl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/sbind.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/unbind.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/cancel.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/filter.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/free.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/sort.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/passwd.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/whoami.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/vc.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/getdn.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/getentry.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/getattr.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/getvalues.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/addentry.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/request.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-ip.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/url.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/pagectrl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/sortctrl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/vlvctrl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/init.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/options.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/print.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/string.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/util-int.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/schema.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/charray.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-local.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/dnssrv.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8-conv.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls2.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_o.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_g.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/turn.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/ppolicy.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/dds.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/txn.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldap_sync.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/stctrl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/assertion.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/deref.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldifutil.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldif.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/fetch.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/lbase64.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/msctrl.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap/psearchctrl.c" ) mkversion(ldap) add_library(ldap ${_libs_type} ${_ldap_srcs} - ${CMAKE_CURRENT_BINARY_DIR}/ldap-version.c + "${CMAKE_CURRENT_BINARY_DIR}/ldap-version.c" ) target_link_libraries(ldap @@ -153,8 +153,8 @@ target_link_libraries(ldap target_include_directories(ldap PRIVATE ${_extra_build_dir}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/libldap + PRIVATE "${OPENLDAP_SOURCE_DIR}/include" + PRIVATE "${OPENLDAP_SOURCE_DIR}/libraries/libldap" PRIVATE ${OPENSSL_INCLUDE_DIR} ) @@ -163,16 +163,16 @@ target_compile_definitions(ldap ) set(_ldap_r_specific_srcs - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/threads.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rdwr.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/tpool.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rq.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_posix.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_thr.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_nt.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_pth.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_stub.c - ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_debug.c + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/threads.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rdwr.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/tpool.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rq.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_posix.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_thr.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_nt.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_pth.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_stub.c" + "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_debug.c" ) mkversion(ldap_r) @@ -180,7 +180,7 @@ mkversion(ldap_r) add_library(ldap_r ${_libs_type} ${_ldap_r_specific_srcs} ${_ldap_srcs} - ${CMAKE_CURRENT_BINARY_DIR}/ldap_r-version.c + "${CMAKE_CURRENT_BINARY_DIR}/ldap_r-version.c" ) target_link_libraries(ldap_r @@ -190,9 +190,9 @@ target_link_libraries(ldap_r target_include_directories(ldap_r PRIVATE ${_extra_build_dir}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/include - PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/libldap_r - PRIVATE ${OPENLDAP_SOURCE_DIR}/libraries/libldap + PRIVATE "${OPENLDAP_SOURCE_DIR}/include" + PRIVATE "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r" + PRIVATE "${OPENLDAP_SOURCE_DIR}/libraries/libldap" PRIVATE ${OPENSSL_INCLUDE_DIR} ) diff --git a/contrib/openldap-cmake/darwin_aarch64/include/lber_types.h b/contrib/openldap-cmake/darwin_aarch64/include/lber_types.h new file mode 100644 index 00000000000..dbd59430527 --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/lber_types.h @@ -0,0 +1,63 @@ +/* include/lber_types.h. Generated from lber_types.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LBER types + */ + +#ifndef _LBER_TYPES_H +#define _LBER_TYPES_H + +#include + +LDAP_BEGIN_DECL + +/* LBER boolean, enum, integers (32 bits or larger) */ +#define LBER_INT_T int + +/* LBER tags (32 bits or larger) */ +#define LBER_TAG_T long + +/* LBER socket descriptor */ +#define LBER_SOCKET_T int + +/* LBER lengths (32 bits or larger) */ +#define LBER_LEN_T long + +/* ------------------------------------------------------------ */ + +/* booleans, enumerations, and integers */ +typedef LBER_INT_T ber_int_t; + +/* signed and unsigned versions */ +typedef signed LBER_INT_T ber_sint_t; +typedef unsigned LBER_INT_T ber_uint_t; + +/* tags */ +typedef unsigned LBER_TAG_T ber_tag_t; + +/* "socket" descriptors */ +typedef LBER_SOCKET_T ber_socket_t; + +/* lengths */ +typedef unsigned LBER_LEN_T ber_len_t; + +/* signed lengths */ +typedef signed LBER_LEN_T ber_slen_t; + +LDAP_END_DECL + +#endif /* _LBER_TYPES_H */ diff --git a/contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h b/contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h new file mode 100644 index 00000000000..89f7b40b884 --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/ldap_config.h @@ -0,0 +1,74 @@ +/* include/ldap_config.h. Generated from ldap_config.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * This file works in conjunction with OpenLDAP configure system. + * If you do no like the values below, adjust your configure options. + */ + +#ifndef _LDAP_CONFIG_H +#define _LDAP_CONFIG_H + +/* directory separator */ +#ifndef LDAP_DIRSEP +#ifndef _WIN32 +#define LDAP_DIRSEP "/" +#else +#define LDAP_DIRSEP "\\" +#endif +#endif + +/* directory for temporary files */ +#if defined(_WIN32) +# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */ +#elif defined( _P_tmpdir ) +# define LDAP_TMPDIR _P_tmpdir +#elif defined( P_tmpdir ) +# define LDAP_TMPDIR P_tmpdir +#elif defined( _PATH_TMPDIR ) +# define LDAP_TMPDIR _PATH_TMPDIR +#else +# define LDAP_TMPDIR LDAP_DIRSEP "tmp" +#endif + +/* directories */ +#ifndef LDAP_BINDIR +#define LDAP_BINDIR "/tmp/ldap-prefix/bin" +#endif +#ifndef LDAP_SBINDIR +#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin" +#endif +#ifndef LDAP_DATADIR +#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap" +#endif +#ifndef LDAP_SYSCONFDIR +#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap" +#endif +#ifndef LDAP_LIBEXECDIR +#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec" +#endif +#ifndef LDAP_MODULEDIR +#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap" +#endif +#ifndef LDAP_RUNDIR +#define LDAP_RUNDIR "/tmp/ldap-prefix/var" +#endif +#ifndef LDAP_LOCALEDIR +#define LDAP_LOCALEDIR "" +#endif + + +#endif /* _LDAP_CONFIG_H */ diff --git a/contrib/openldap-cmake/darwin_aarch64/include/ldap_features.h b/contrib/openldap-cmake/darwin_aarch64/include/ldap_features.h new file mode 100644 index 00000000000..f0cc7c3626f --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/ldap_features.h @@ -0,0 +1,61 @@ +/* include/ldap_features.h. Generated from ldap_features.hin by configure. */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* + * LDAP Features + */ + +#ifndef _LDAP_FEATURES_H +#define _LDAP_FEATURES_H 1 + +/* OpenLDAP API version macros */ +#define LDAP_VENDOR_VERSION 20501 +#define LDAP_VENDOR_VERSION_MAJOR 2 +#define LDAP_VENDOR_VERSION_MINOR 5 +#define LDAP_VENDOR_VERSION_PATCH X + +/* +** WORK IN PROGRESS! +** +** OpenLDAP reentrancy/thread-safeness should be dynamically +** checked using ldap_get_option(). +** +** The -lldap implementation is not thread-safe. +** +** The -lldap_r implementation is: +** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety) +** but also be: +** LDAP_API_FEATURE_SESSION_THREAD_SAFE +** LDAP_API_FEATURE_OPERATION_THREAD_SAFE +** +** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE +** can be used to determine if -lldap_r is available at compile +** time. You must define LDAP_THREAD_SAFE if and only if you +** link with -lldap_r. +** +** If you fail to define LDAP_THREAD_SAFE when linking with +** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap, +** provided header definitions and declarations may be incorrect. +** +*/ + +/* is -lldap_r available or not */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* LDAP v2 Referrals */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +#endif /* LDAP_FEATURES */ diff --git a/contrib/openldap-cmake/darwin_aarch64/include/portable.h b/contrib/openldap-cmake/darwin_aarch64/include/portable.h new file mode 100644 index 00000000000..fdf4e89017e --- /dev/null +++ b/contrib/openldap-cmake/darwin_aarch64/include/portable.h @@ -0,0 +1,1169 @@ +/* include/portable.h. Generated from portable.hin by configure. */ +/* include/portable.hin. Generated from configure.in by autoheader. */ + + +/* begin of portable.h.pre */ +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2020 The OpenLDAP Foundation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +#ifndef _LDAP_PORTABLE_H +#define _LDAP_PORTABLE_H + +/* define this if needed to get reentrant functions */ +#ifndef REENTRANT +#define REENTRANT 1 +#endif +#ifndef _REENTRANT +#define _REENTRANT 1 +#endif + +/* define this if needed to get threadsafe functions */ +#ifndef THREADSAFE +#define THREADSAFE 1 +#endif +#ifndef _THREADSAFE +#define _THREADSAFE 1 +#endif +#ifndef THREAD_SAFE +#define THREAD_SAFE 1 +#endif +#ifndef _THREAD_SAFE +#define _THREAD_SAFE 1 +#endif + +#ifndef _SGI_MP_SOURCE +#define _SGI_MP_SOURCE 1 +#endif + +/* end of portable.h.pre */ + + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* define to use both and */ +/* #undef BOTH_STRINGS_H */ + +/* define if cross compiling */ +/* #undef CROSS_COMPILING */ + +/* set to the number of arguments ctime_r() expects */ +#define CTIME_R_NARGS 2 + +/* define if toupper() requires islower() */ +/* #undef C_UPPER_LOWER */ + +/* define if sys_errlist is not declared in stdio.h or errno.h */ +/* #undef DECL_SYS_ERRLIST */ + +/* define to enable slapi library */ +/* #undef ENABLE_SLAPI */ + +/* defined to be the EXE extension */ +#define EXEEXT "" + +/* set to the number of arguments gethostbyaddr_r() expects */ +/* #undef GETHOSTBYADDR_R_NARGS */ + +/* set to the number of arguments gethostbyname_r() expects */ +/* #undef GETHOSTBYNAME_R_NARGS */ + +/* Define to 1 if `TIOCGWINSZ' requires . */ +/* #undef GWINSZ_IN_SYS_IOCTL */ + +/* define if you have AIX security lib */ +/* #undef HAVE_AIX_SECURITY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ASSERT_H 1 + +/* Define to 1 if you have the `bcopy' function. */ +#define HAVE_BCOPY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_BITS_TYPES_H */ + +/* Define to 1 if you have the `chroot' function. */ +#define HAVE_CHROOT 1 + +/* Define to 1 if you have the `closesocket' function. */ +/* #undef HAVE_CLOSESOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CONIO_H */ + +/* define if crypt(3) is available */ +/* #undef HAVE_CRYPT */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CRYPT_H */ + +/* define if crypt_r() is also available */ +/* #undef HAVE_CRYPT_R */ + +/* Define to 1 if you have the `ctime_r' function. */ +#define HAVE_CTIME_R 1 + +/* define if you have Cyrus SASL */ +/* #undef HAVE_CYRUS_SASL */ + +/* define if your system supports /dev/poll */ +/* #undef HAVE_DEVPOLL */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DIRECT_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* define if system uses EBCDIC instead of ASCII */ +/* #undef HAVE_EBCDIC */ + +/* Define to 1 if you have the `endgrent' function. */ +#define HAVE_ENDGRENT 1 + +/* Define to 1 if you have the `endpwent' function. */ +#define HAVE_ENDPWENT 1 + +/* define if your system supports epoll */ +/* #undef HAVE_EPOLL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the `fcntl' function. */ +#define HAVE_FCNTL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* define if you actually have FreeBSD fetch(3) */ +/* #undef HAVE_FETCH */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_FILIO_H */ + +/* Define to 1 if you have the `flock' function. */ +#define HAVE_FLOCK 1 + +/* Define to 1 if you have the `fstat' function. */ +#define HAVE_FSTAT 1 + +/* Define to 1 if you have the `gai_strerror' function. */ +#define HAVE_GAI_STRERROR 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getdtablesize' function. */ +#define HAVE_GETDTABLESIZE 1 + +/* Define to 1 if you have the `geteuid' function. */ +#define HAVE_GETEUID 1 + +/* Define to 1 if you have the `getgrgid' function. */ +#define HAVE_GETGRGID 1 + +/* Define to 1 if you have the `gethostbyaddr_r' function. */ +/* #undef HAVE_GETHOSTBYADDR_R */ + +/* Define to 1 if you have the `gethostbyname_r' function. */ +/* #undef HAVE_GETHOSTBYNAME_R */ + +/* Define to 1 if you have the `gethostname' function. */ +#define HAVE_GETHOSTNAME 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#define HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the `getopt' function. */ +#define HAVE_GETOPT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_GETOPT_H 1 + +/* Define to 1 if you have the `getpassphrase' function. */ +/* #undef HAVE_GETPASSPHRASE */ + +/* Define to 1 if you have the `getpeereid' function. */ +#define HAVE_GETPEEREID 1 + +/* Define to 1 if you have the `getpeerucred' function. */ +/* #undef HAVE_GETPEERUCRED */ + +/* Define to 1 if you have the `getpwnam' function. */ +#define HAVE_GETPWNAM 1 + +/* Define to 1 if you have the `getpwuid' function. */ +#define HAVE_GETPWUID 1 + +/* Define to 1 if you have the `getspnam' function. */ +/* #undef HAVE_GETSPNAM */ + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GMP_H */ + +/* Define to 1 if you have the `gmtime_r' function. */ +#define HAVE_GMTIME_R 1 + +/* define if you have GNUtls */ +/* #undef HAVE_GNUTLS */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_GNUTLS_GNUTLS_H */ + +/* if you have GNU Pth */ +/* #undef HAVE_GNU_PTH */ + +/* Define to 1 if you have the header file. */ +#define HAVE_GRP_H 1 + +/* Define to 1 if you have the `hstrerror' function. */ +#define HAVE_HSTRERROR 1 + +/* define to you inet_aton(3) is available */ +#define HAVE_INET_ATON 1 + +/* Define to 1 if you have the `inet_ntoa_b' function. */ +/* #undef HAVE_INET_NTOA_B */ + +/* Define to 1 if you have the `inet_ntop' function. */ +#define HAVE_INET_NTOP 1 + +/* Define to 1 if you have the `initgroups' function. */ +#define HAVE_INITGROUPS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `ioctl' function. */ +#define HAVE_IOCTL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_IO_H */ + +/* define if your system supports kqueue */ +#define HAVE_KQUEUE 1 + +/* Define to 1 if you have the `gen' library (-lgen). */ +/* #undef HAVE_LIBGEN */ + +/* Define to 1 if you have the `gmp' library (-lgmp). */ +/* #undef HAVE_LIBGMP */ + +/* Define to 1 if you have the `inet' library (-linet). */ +/* #undef HAVE_LIBINET */ + +/* define if you have libtool -ltdl */ +/* #undef HAVE_LIBLTDL */ + +/* Define to 1 if you have the `net' library (-lnet). */ +/* #undef HAVE_LIBNET */ + +/* Define to 1 if you have the `nsl' library (-lnsl). */ +/* #undef HAVE_LIBNSL */ + +/* Define to 1 if you have the `nsl_s' library (-lnsl_s). */ +/* #undef HAVE_LIBNSL_S */ + +/* Define to 1 if you have the `socket' library (-lsocket). */ +/* #undef HAVE_LIBSOCKET */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LIBUTIL_H */ + +/* Define to 1 if you have the `V3' library (-lV3). */ +/* #undef HAVE_LIBV3 */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* if you have LinuxThreads */ +/* #undef HAVE_LINUX_THREADS */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Define to 1 if you have the `localtime_r' function. */ +#define HAVE_LOCALTIME_R 1 + +/* Define to 1 if you have the `lockf' function. */ +#define HAVE_LOCKF 1 + +/* Define to 1 if the system has the type `long long'. */ +#define HAVE_LONG_LONG 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LTDL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MALLOC_H */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memmove' function. */ +#define HAVE_MEMMOVE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `memrchr' function. */ +/* #undef HAVE_MEMRCHR */ + +/* Define to 1 if you have the `mkstemp' function. */ +#define HAVE_MKSTEMP 1 + +/* Define to 1 if you have the `mktemp' function. */ +#define HAVE_MKTEMP 1 + +/* define this if you have mkversion */ +#define HAVE_MKVERSION 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_TCP_H 1 + +/* define if strerror_r returns char* instead of int */ +/* #undef HAVE_NONPOSIX_STRERROR_R */ + +/* if you have NT Event Log */ +/* #undef HAVE_NT_EVENT_LOG */ + +/* if you have NT Service Manager */ +/* #undef HAVE_NT_SERVICE_MANAGER */ + +/* if you have NT Threads */ +/* #undef HAVE_NT_THREADS */ + +/* define if you have OpenSSL */ +#define HAVE_OPENSSL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_BN_H 1 + +/* define if you have OpenSSL with CRL checking capability */ +#define HAVE_OPENSSL_CRL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_CRYPTO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_OPENSSL_SSL_H 1 + +/* Define to 1 if you have the `pipe' function. */ +#define HAVE_PIPE 1 + +/* Define to 1 if you have the `poll' function. */ +#define HAVE_POLL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PROCESS_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PSAP_H */ + +/* define to pthreads API spec revision */ +#define HAVE_PTHREADS 10 + +/* define if you have pthread_detach function */ +#define HAVE_PTHREAD_DETACH 1 + +/* Define to 1 if you have the `pthread_getconcurrency' function. */ +#define HAVE_PTHREAD_GETCONCURRENCY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_H 1 + +/* Define to 1 if you have the `pthread_kill' function. */ +#define HAVE_PTHREAD_KILL 1 + +/* Define to 1 if you have the `pthread_kill_other_threads_np' function. */ +/* #undef HAVE_PTHREAD_KILL_OTHER_THREADS_NP */ + +/* define if you have pthread_rwlock_destroy function */ +#define HAVE_PTHREAD_RWLOCK_DESTROY 1 + +/* Define to 1 if you have the `pthread_setconcurrency' function. */ +#define HAVE_PTHREAD_SETCONCURRENCY 1 + +/* Define to 1 if you have the `pthread_yield' function. */ +/* #undef HAVE_PTHREAD_YIELD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PTH_H */ + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PWD_H 1 + +/* Define to 1 if you have the `read' function. */ +#define HAVE_READ 1 + +/* Define to 1 if you have the `recv' function. */ +#define HAVE_RECV 1 + +/* Define to 1 if you have the `recvfrom' function. */ +#define HAVE_RECVFROM 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_REGEX_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_RESOLV_H */ + +/* define if you have res_query() */ +/* #undef HAVE_RES_QUERY */ + +/* define if OpenSSL needs RSAref */ +/* #undef HAVE_RSAREF */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SASL_SASL_H */ + +/* define if your SASL library has sasl_version() */ +/* #undef HAVE_SASL_VERSION */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SCHED_H 1 + +/* Define to 1 if you have the `sched_yield' function. */ +#define HAVE_SCHED_YIELD 1 + +/* Define to 1 if you have the `send' function. */ +#define HAVE_SEND 1 + +/* Define to 1 if you have the `sendmsg' function. */ +#define HAVE_SENDMSG 1 + +/* Define to 1 if you have the `sendto' function. */ +#define HAVE_SENDTO 1 + +/* Define to 1 if you have the `setegid' function. */ +#define HAVE_SETEGID 1 + +/* Define to 1 if you have the `seteuid' function. */ +#define HAVE_SETEUID 1 + +/* Define to 1 if you have the `setgid' function. */ +#define HAVE_SETGID 1 + +/* Define to 1 if you have the `setpwfile' function. */ +/* #undef HAVE_SETPWFILE */ + +/* Define to 1 if you have the `setsid' function. */ +#define HAVE_SETSID 1 + +/* Define to 1 if you have the `setuid' function. */ +#define HAVE_SETUID 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SGTTY_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SHADOW_H */ + +/* Define to 1 if you have the `sigaction' function. */ +#define HAVE_SIGACTION 1 + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the `sigset' function. */ +#define HAVE_SIGSET 1 + +/* define if you have -lslp */ +/* #undef HAVE_SLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SLP_H */ + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* if you have spawnlp() */ +/* #undef HAVE_SPAWNLP */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQLEXT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SQL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `strerror' function. */ +#define HAVE_STRERROR 1 + +/* Define to 1 if you have the `strerror_r' function. */ +#define HAVE_STRERROR_R 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strpbrk' function. */ +#define HAVE_STRPBRK 1 + +/* Define to 1 if you have the `strrchr' function. */ +#define HAVE_STRRCHR 1 + +/* Define to 1 if you have the `strsep' function. */ +#define HAVE_STRSEP 1 + +/* Define to 1 if you have the `strspn' function. */ +#define HAVE_STRSPN 1 + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have the `strtoq' function. */ +#define HAVE_STRTOQ 1 + +/* Define to 1 if you have the `strtoul' function. */ +#define HAVE_STRTOUL 1 + +/* Define to 1 if you have the `strtoull' function. */ +#define HAVE_STRTOULL 1 + +/* Define to 1 if you have the `strtouq' function. */ +#define HAVE_STRTOUQ 1 + +/* Define to 1 if `msg_accrightslen' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_ACCRIGHTSLEN */ + +/* Define to 1 if `msg_control' is a member of `struct msghdr'. */ +/* #undef HAVE_STRUCT_MSGHDR_MSG_CONTROL */ + +/* Define to 1 if `pw_gecos' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_GECOS 1 + +/* Define to 1 if `pw_passwd' is a member of `struct passwd'. */ +#define HAVE_STRUCT_PASSWD_PW_PASSWD 1 + +/* Define to 1 if `st_blksize' is a member of `struct stat'. */ +#define HAVE_STRUCT_STAT_ST_BLKSIZE 1 + +/* Define to 1 if `st_fstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE */ + +/* define to 1 if st_fstype is char * */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_CHAR */ + +/* define to 1 if st_fstype is int */ +/* #undef HAVE_STRUCT_STAT_ST_FSTYPE_INT */ + +/* Define to 1 if `st_vfstype' is a member of `struct stat'. */ +/* #undef HAVE_STRUCT_STAT_ST_VFSTYPE */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYNCH_H */ + +/* Define to 1 if you have the `sysconf' function. */ +#define HAVE_SYSCONF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSEXITS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DEVPOLL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_EPOLL_H */ + +/* define if you actually have sys_errlist in your libs */ +#define HAVE_SYS_ERRLIST 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_EVENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_FILIO_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_FSTYP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_IOCTL_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_PARAM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_POLL_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_PRIVGRP_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_RESOURCE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SYSLOG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UCRED_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_UN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UUID_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_VMOUNT_H */ + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#define HAVE_SYS_WAIT_H 1 + +/* define if you have -lwrap */ +/* #undef HAVE_TCPD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_TCPD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_TERMIOS_H 1 + +/* if you have Solaris LWP (thr) package */ +/* #undef HAVE_THR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_THREAD_H */ + +/* Define to 1 if you have the `thr_getconcurrency' function. */ +/* #undef HAVE_THR_GETCONCURRENCY */ + +/* Define to 1 if you have the `thr_setconcurrency' function. */ +/* #undef HAVE_THR_SETCONCURRENCY */ + +/* Define to 1 if you have the `thr_yield' function. */ +/* #undef HAVE_THR_YIELD */ + +/* define if you have TLS */ +#define HAVE_TLS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UTIME_H 1 + +/* define if you have uuid_generate() */ +/* #undef HAVE_UUID_GENERATE */ + +/* define if you have uuid_to_str() */ +/* #undef HAVE_UUID_TO_STR */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UUID_UUID_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `wait4' function. */ +#define HAVE_WAIT4 1 + +/* Define to 1 if you have the `waitpid' function. */ +#define HAVE_WAITPID 1 + +/* define if you have winsock */ +/* #undef HAVE_WINSOCK */ + +/* define if you have winsock2 */ +/* #undef HAVE_WINSOCK2 */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK2_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WINSOCK_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_WIREDTIGER_H */ + +/* Define to 1 if you have the `write' function. */ +#define HAVE_WRITE 1 + +/* define if select implicitly yields */ +#define HAVE_YIELDING_SELECT 1 + +/* Define to 1 if you have the `_vsnprintf' function. */ +/* #undef HAVE__VSNPRINTF */ + +/* define to 32-bit or greater integer type */ +#define LBER_INT_T int + +/* define to large integer type */ +#define LBER_LEN_T long + +/* define to socket descriptor type */ +#define LBER_SOCKET_T int + +/* define to large integer type */ +#define LBER_TAG_T long + +/* define to 1 if library is thread safe */ +#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1 + +/* define to LDAP VENDOR VERSION */ +/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */ + +/* define this to add debugging code */ +/* #undef LDAP_DEBUG */ + +/* define if LDAP libs are dynamic */ +/* #undef LDAP_LIBS_DYNAMIC */ + +/* define to support PF_INET6 */ +#define LDAP_PF_INET6 1 + +/* define to support PF_LOCAL */ +#define LDAP_PF_LOCAL 1 + +/* define this to add SLAPI code */ +/* #undef LDAP_SLAPI */ + +/* define this to add syslog code */ +/* #undef LDAP_SYSLOG */ + +/* Version */ +#define LDAP_VENDOR_VERSION 20501 + +/* Major */ +#define LDAP_VENDOR_VERSION_MAJOR 2 + +/* Minor */ +#define LDAP_VENDOR_VERSION_MINOR 5 + +/* Patch */ +#define LDAP_VENDOR_VERSION_PATCH X + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* define if memcmp is not 8-bit clean or is otherwise broken */ +/* #undef NEED_MEMCMP_REPLACEMENT */ + +/* define if you have (or want) no threads */ +/* #undef NO_THREADS */ + +/* define to use the original debug style */ +/* #undef OLD_DEBUG */ + +/* Package */ +#define OPENLDAP_PACKAGE "OpenLDAP" + +/* Version */ +#define OPENLDAP_VERSION "2.5.X" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* define if sched_yield yields the entire process */ +/* #undef REPLACE_BROKEN_YIELD */ + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to the type of arg 1 for `select'. */ +#define SELECT_TYPE_ARG1 int + +/* Define to the type of args 2, 3 and 4 for `select'. */ +#define SELECT_TYPE_ARG234 (fd_set *) + +/* Define to the type of arg 5 for `select'. */ +#define SELECT_TYPE_ARG5 (struct timeval *) + +/* The size of `int', as computed by sizeof. */ +#define SIZEOF_INT 4 + +/* The size of `long', as computed by sizeof. */ +#define SIZEOF_LONG 8 + +/* The size of `long long', as computed by sizeof. */ +#define SIZEOF_LONG_LONG 8 + +/* The size of `short', as computed by sizeof. */ +#define SIZEOF_SHORT 2 + +/* The size of `wchar_t', as computed by sizeof. */ +#define SIZEOF_WCHAR_T 4 + +/* define to support per-object ACIs */ +/* #undef SLAPD_ACI_ENABLED */ + +/* define to support LDAP Async Metadirectory backend */ +/* #undef SLAPD_ASYNCMETA */ + +/* define to support cleartext passwords */ +/* #undef SLAPD_CLEARTEXT */ + +/* define to support crypt(3) passwords */ +/* #undef SLAPD_CRYPT */ + +/* define to support DNS SRV backend */ +/* #undef SLAPD_DNSSRV */ + +/* define to support LDAP backend */ +/* #undef SLAPD_LDAP */ + +/* define to support MDB backend */ +/* #undef SLAPD_MDB */ + +/* define to support LDAP Metadirectory backend */ +/* #undef SLAPD_META */ + +/* define to support modules */ +/* #undef SLAPD_MODULES */ + +/* dynamically linked module */ +#define SLAPD_MOD_DYNAMIC 2 + +/* statically linked module */ +#define SLAPD_MOD_STATIC 1 + +/* define to support cn=Monitor backend */ +/* #undef SLAPD_MONITOR */ + +/* define to support NDB backend */ +/* #undef SLAPD_NDB */ + +/* define to support NULL backend */ +/* #undef SLAPD_NULL */ + +/* define for In-Directory Access Logging overlay */ +/* #undef SLAPD_OVER_ACCESSLOG */ + +/* define for Audit Logging overlay */ +/* #undef SLAPD_OVER_AUDITLOG */ + +/* define for Automatic Certificate Authority overlay */ +/* #undef SLAPD_OVER_AUTOCA */ + +/* define for Collect overlay */ +/* #undef SLAPD_OVER_COLLECT */ + +/* define for Attribute Constraint overlay */ +/* #undef SLAPD_OVER_CONSTRAINT */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DDS */ + +/* define for Dynamic Directory Services overlay */ +/* #undef SLAPD_OVER_DEREF */ + +/* define for Dynamic Group overlay */ +/* #undef SLAPD_OVER_DYNGROUP */ + +/* define for Dynamic List overlay */ +/* #undef SLAPD_OVER_DYNLIST */ + +/* define for Reverse Group Membership overlay */ +/* #undef SLAPD_OVER_MEMBEROF */ + +/* define for Password Policy overlay */ +/* #undef SLAPD_OVER_PPOLICY */ + +/* define for Proxy Cache overlay */ +/* #undef SLAPD_OVER_PROXYCACHE */ + +/* define for Referential Integrity overlay */ +/* #undef SLAPD_OVER_REFINT */ + +/* define for Return Code overlay */ +/* #undef SLAPD_OVER_RETCODE */ + +/* define for Rewrite/Remap overlay */ +/* #undef SLAPD_OVER_RWM */ + +/* define for Sequential Modify overlay */ +/* #undef SLAPD_OVER_SEQMOD */ + +/* define for ServerSideSort/VLV overlay */ +/* #undef SLAPD_OVER_SSSVLV */ + +/* define for Syncrepl Provider overlay */ +/* #undef SLAPD_OVER_SYNCPROV */ + +/* define for Translucent Proxy overlay */ +/* #undef SLAPD_OVER_TRANSLUCENT */ + +/* define for Attribute Uniqueness overlay */ +/* #undef SLAPD_OVER_UNIQUE */ + +/* define for Value Sorting overlay */ +/* #undef SLAPD_OVER_VALSORT */ + +/* define to support PASSWD backend */ +/* #undef SLAPD_PASSWD */ + +/* define to support PERL backend */ +/* #undef SLAPD_PERL */ + +/* define to support relay backend */ +/* #undef SLAPD_RELAY */ + +/* define to support reverse lookups */ +/* #undef SLAPD_RLOOKUPS */ + +/* define to support SHELL backend */ +/* #undef SLAPD_SHELL */ + +/* define to support SOCK backend */ +/* #undef SLAPD_SOCK */ + +/* define to support SASL passwords */ +/* #undef SLAPD_SPASSWD */ + +/* define to support SQL backend */ +/* #undef SLAPD_SQL */ + +/* define to support WiredTiger backend */ +/* #undef SLAPD_WT */ + +/* define to support run-time loadable ACL */ +/* #undef SLAP_DYNACL */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* set to urandom device */ +#define URANDOM_DEVICE "/dev/urandom" + +/* define to use OpenSSL BIGNUM for MP */ +/* #undef USE_MP_BIGNUM */ + +/* define to use GMP for MP */ +/* #undef USE_MP_GMP */ + +/* define to use 'long' for MP */ +/* #undef USE_MP_LONG */ + +/* define to use 'long long' for MP */ +/* #undef USE_MP_LONG_LONG */ + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to the type of arg 3 for `accept'. */ +#define ber_socklen_t socklen_t + +/* Define to `char *' if does not define. */ +/* #undef caddr_t */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define to `int' if does not define. */ +/* #undef mode_t */ + +/* Define to `long' if does not define. */ +/* #undef off_t */ + +/* Define to `int' if does not define. */ +/* #undef pid_t */ + +/* Define to `int' if does not define. */ +/* #undef sig_atomic_t */ + +/* Define to `unsigned' if does not define. */ +/* #undef size_t */ + +/* define to snprintf routine */ +/* #undef snprintf */ + +/* Define like ber_socklen_t if does not define. */ +/* #undef socklen_t */ + +/* Define to `signed int' if does not define. */ +/* #undef ssize_t */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ + +/* define as empty if volatile is not supported */ +/* #undef volatile */ + +/* define to snprintf routine */ +/* #undef vsnprintf */ + + +/* begin of portable.h.post */ + +#ifdef _WIN32 +/* don't suck in all of the win32 api */ +# define WIN32_LEAN_AND_MEAN 1 +#endif + +#ifndef LDAP_NEEDS_PROTOTYPES +/* force LDAP_P to always include prototypes */ +#define LDAP_NEEDS_PROTOTYPES 1 +#endif + +#ifndef LDAP_REL_ENG +#if (LDAP_VENDOR_VERSION == 000000) && !defined(LDAP_DEVEL) +#define LDAP_DEVEL +#endif +#if defined(LDAP_DEVEL) && !defined(LDAP_TEST) +#define LDAP_TEST +#endif +#endif + +#ifdef HAVE_STDDEF_H +# include +#endif + +#ifdef HAVE_EBCDIC +/* ASCII/EBCDIC converting replacements for stdio funcs + * vsnprintf and snprintf are used too, but they are already + * checked by the configure script + */ +#define fputs ber_pvt_fputs +#define fgets ber_pvt_fgets +#define printf ber_pvt_printf +#define fprintf ber_pvt_fprintf +#define vfprintf ber_pvt_vfprintf +#define vsprintf ber_pvt_vsprintf +#endif + +#include "ac/fdset.h" + +#include "ldap_cdefs.h" +#include "ldap_features.h" + +#include "ac/assert.h" +#include "ac/localize.h" + +#endif /* _LDAP_PORTABLE_H */ +/* end of portable.h.post */ + diff --git a/contrib/poco b/contrib/poco index 83beecccb09..59945069080 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 83beecccb09eec0c9fd2669cacea03ede1d9f138 +Subproject commit 5994506908028612869fee627d68d8212dfe7c1e diff --git a/contrib/poco-cmake/CMakeLists.txt b/contrib/poco-cmake/CMakeLists.txt index 1d2dc7b873e..d173f35b9bf 100644 --- a/contrib/poco-cmake/CMakeLists.txt +++ b/contrib/poco-cmake/CMakeLists.txt @@ -1,4 +1,4 @@ -set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/poco) +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/poco") add_subdirectory (Crypto) add_subdirectory (Data) diff --git a/contrib/poco-cmake/Crypto/CMakeLists.txt b/contrib/poco-cmake/Crypto/CMakeLists.txt index 1685e96728b..e93ed5cf17d 100644 --- a/contrib/poco-cmake/Crypto/CMakeLists.txt +++ b/contrib/poco-cmake/Crypto/CMakeLists.txt @@ -1,35 +1,35 @@ if (ENABLE_SSL) if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Crypto/src/Cipher.cpp - ${LIBRARY_DIR}/Crypto/src/CipherFactory.cpp - ${LIBRARY_DIR}/Crypto/src/CipherImpl.cpp - ${LIBRARY_DIR}/Crypto/src/CipherKey.cpp - ${LIBRARY_DIR}/Crypto/src/CipherKeyImpl.cpp - ${LIBRARY_DIR}/Crypto/src/CryptoException.cpp - ${LIBRARY_DIR}/Crypto/src/CryptoStream.cpp - ${LIBRARY_DIR}/Crypto/src/CryptoTransform.cpp - ${LIBRARY_DIR}/Crypto/src/DigestEngine.cpp - ${LIBRARY_DIR}/Crypto/src/ECDSADigestEngine.cpp - ${LIBRARY_DIR}/Crypto/src/ECKey.cpp - ${LIBRARY_DIR}/Crypto/src/ECKeyImpl.cpp - ${LIBRARY_DIR}/Crypto/src/EVPPKey.cpp - ${LIBRARY_DIR}/Crypto/src/KeyPair.cpp - ${LIBRARY_DIR}/Crypto/src/KeyPairImpl.cpp - ${LIBRARY_DIR}/Crypto/src/OpenSSLInitializer.cpp - ${LIBRARY_DIR}/Crypto/src/PKCS12Container.cpp - ${LIBRARY_DIR}/Crypto/src/RSACipherImpl.cpp - ${LIBRARY_DIR}/Crypto/src/RSADigestEngine.cpp - ${LIBRARY_DIR}/Crypto/src/RSAKey.cpp - ${LIBRARY_DIR}/Crypto/src/RSAKeyImpl.cpp - ${LIBRARY_DIR}/Crypto/src/X509Certificate.cpp + "${LIBRARY_DIR}/Crypto/src/Cipher.cpp" + "${LIBRARY_DIR}/Crypto/src/CipherFactory.cpp" + "${LIBRARY_DIR}/Crypto/src/CipherImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/CipherKey.cpp" + "${LIBRARY_DIR}/Crypto/src/CipherKeyImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/CryptoException.cpp" + "${LIBRARY_DIR}/Crypto/src/CryptoStream.cpp" + "${LIBRARY_DIR}/Crypto/src/CryptoTransform.cpp" + "${LIBRARY_DIR}/Crypto/src/DigestEngine.cpp" + "${LIBRARY_DIR}/Crypto/src/ECDSADigestEngine.cpp" + "${LIBRARY_DIR}/Crypto/src/ECKey.cpp" + "${LIBRARY_DIR}/Crypto/src/ECKeyImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/EVPPKey.cpp" + "${LIBRARY_DIR}/Crypto/src/KeyPair.cpp" + "${LIBRARY_DIR}/Crypto/src/KeyPairImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/OpenSSLInitializer.cpp" + "${LIBRARY_DIR}/Crypto/src/PKCS12Container.cpp" + "${LIBRARY_DIR}/Crypto/src/RSACipherImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/RSADigestEngine.cpp" + "${LIBRARY_DIR}/Crypto/src/RSAKey.cpp" + "${LIBRARY_DIR}/Crypto/src/RSAKeyImpl.cpp" + "${LIBRARY_DIR}/Crypto/src/X509Certificate.cpp" ) add_library (_poco_crypto ${SRCS}) add_library (Poco::Crypto ALIAS _poco_crypto) target_compile_options (_poco_crypto PRIVATE -Wno-newline-eof) - target_include_directories (_poco_crypto SYSTEM PUBLIC ${LIBRARY_DIR}/Crypto/include) + target_include_directories (_poco_crypto SYSTEM PUBLIC "${LIBRARY_DIR}/Crypto/include") target_link_libraries (_poco_crypto PUBLIC Poco::Foundation ssl crypto) else () add_library (Poco::Crypto UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Data/CMakeLists.txt b/contrib/poco-cmake/Data/CMakeLists.txt index 1c185df8961..4fdd755b45d 100644 --- a/contrib/poco-cmake/Data/CMakeLists.txt +++ b/contrib/poco-cmake/Data/CMakeLists.txt @@ -1,40 +1,40 @@ if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Data/src/AbstractBinder.cpp - ${LIBRARY_DIR}/Data/src/AbstractBinding.cpp - ${LIBRARY_DIR}/Data/src/AbstractExtraction.cpp - ${LIBRARY_DIR}/Data/src/AbstractExtractor.cpp - ${LIBRARY_DIR}/Data/src/AbstractPreparation.cpp - ${LIBRARY_DIR}/Data/src/AbstractPreparator.cpp - ${LIBRARY_DIR}/Data/src/ArchiveStrategy.cpp - ${LIBRARY_DIR}/Data/src/Bulk.cpp - ${LIBRARY_DIR}/Data/src/Connector.cpp - ${LIBRARY_DIR}/Data/src/DataException.cpp - ${LIBRARY_DIR}/Data/src/Date.cpp - ${LIBRARY_DIR}/Data/src/DynamicLOB.cpp - ${LIBRARY_DIR}/Data/src/Limit.cpp - ${LIBRARY_DIR}/Data/src/MetaColumn.cpp - ${LIBRARY_DIR}/Data/src/PooledSessionHolder.cpp - ${LIBRARY_DIR}/Data/src/PooledSessionImpl.cpp - ${LIBRARY_DIR}/Data/src/Position.cpp - ${LIBRARY_DIR}/Data/src/Range.cpp - ${LIBRARY_DIR}/Data/src/RecordSet.cpp - ${LIBRARY_DIR}/Data/src/Row.cpp - ${LIBRARY_DIR}/Data/src/RowFilter.cpp - ${LIBRARY_DIR}/Data/src/RowFormatter.cpp - ${LIBRARY_DIR}/Data/src/RowIterator.cpp - ${LIBRARY_DIR}/Data/src/Session.cpp - ${LIBRARY_DIR}/Data/src/SessionFactory.cpp - ${LIBRARY_DIR}/Data/src/SessionImpl.cpp - ${LIBRARY_DIR}/Data/src/SessionPool.cpp - ${LIBRARY_DIR}/Data/src/SessionPoolContainer.cpp - ${LIBRARY_DIR}/Data/src/SimpleRowFormatter.cpp - ${LIBRARY_DIR}/Data/src/SQLChannel.cpp - ${LIBRARY_DIR}/Data/src/Statement.cpp - ${LIBRARY_DIR}/Data/src/StatementCreator.cpp - ${LIBRARY_DIR}/Data/src/StatementImpl.cpp - ${LIBRARY_DIR}/Data/src/Time.cpp - ${LIBRARY_DIR}/Data/src/Transaction.cpp + "${LIBRARY_DIR}/Data/src/AbstractBinder.cpp" + "${LIBRARY_DIR}/Data/src/AbstractBinding.cpp" + "${LIBRARY_DIR}/Data/src/AbstractExtraction.cpp" + "${LIBRARY_DIR}/Data/src/AbstractExtractor.cpp" + "${LIBRARY_DIR}/Data/src/AbstractPreparation.cpp" + "${LIBRARY_DIR}/Data/src/AbstractPreparator.cpp" + "${LIBRARY_DIR}/Data/src/ArchiveStrategy.cpp" + "${LIBRARY_DIR}/Data/src/Bulk.cpp" + "${LIBRARY_DIR}/Data/src/Connector.cpp" + "${LIBRARY_DIR}/Data/src/DataException.cpp" + "${LIBRARY_DIR}/Data/src/Date.cpp" + "${LIBRARY_DIR}/Data/src/DynamicLOB.cpp" + "${LIBRARY_DIR}/Data/src/Limit.cpp" + "${LIBRARY_DIR}/Data/src/MetaColumn.cpp" + "${LIBRARY_DIR}/Data/src/PooledSessionHolder.cpp" + "${LIBRARY_DIR}/Data/src/PooledSessionImpl.cpp" + "${LIBRARY_DIR}/Data/src/Position.cpp" + "${LIBRARY_DIR}/Data/src/Range.cpp" + "${LIBRARY_DIR}/Data/src/RecordSet.cpp" + "${LIBRARY_DIR}/Data/src/Row.cpp" + "${LIBRARY_DIR}/Data/src/RowFilter.cpp" + "${LIBRARY_DIR}/Data/src/RowFormatter.cpp" + "${LIBRARY_DIR}/Data/src/RowIterator.cpp" + "${LIBRARY_DIR}/Data/src/Session.cpp" + "${LIBRARY_DIR}/Data/src/SessionFactory.cpp" + "${LIBRARY_DIR}/Data/src/SessionImpl.cpp" + "${LIBRARY_DIR}/Data/src/SessionPool.cpp" + "${LIBRARY_DIR}/Data/src/SessionPoolContainer.cpp" + "${LIBRARY_DIR}/Data/src/SimpleRowFormatter.cpp" + "${LIBRARY_DIR}/Data/src/SQLChannel.cpp" + "${LIBRARY_DIR}/Data/src/Statement.cpp" + "${LIBRARY_DIR}/Data/src/StatementCreator.cpp" + "${LIBRARY_DIR}/Data/src/StatementImpl.cpp" + "${LIBRARY_DIR}/Data/src/Time.cpp" + "${LIBRARY_DIR}/Data/src/Transaction.cpp" ) add_library (_poco_data ${SRCS}) @@ -43,7 +43,7 @@ if (USE_INTERNAL_POCO_LIBRARY) if (COMPILER_GCC) target_compile_options (_poco_data PRIVATE -Wno-deprecated-copy) endif () - target_include_directories (_poco_data SYSTEM PUBLIC ${LIBRARY_DIR}/Data/include) + target_include_directories (_poco_data SYSTEM PUBLIC "${LIBRARY_DIR}/Data/include") target_link_libraries (_poco_data PUBLIC Poco::Foundation) else () # NOTE: don't know why, but the GLOBAL is required here. diff --git a/contrib/poco-cmake/Data/ODBC/CMakeLists.txt b/contrib/poco-cmake/Data/ODBC/CMakeLists.txt index cd7c5ef2863..a3561304541 100644 --- a/contrib/poco-cmake/Data/ODBC/CMakeLists.txt +++ b/contrib/poco-cmake/Data/ODBC/CMakeLists.txt @@ -5,27 +5,27 @@ if (ENABLE_ODBC) if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Data/ODBC/src/Binder.cpp - ${LIBRARY_DIR}/Data/ODBC/src/ConnectionHandle.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Connector.cpp - ${LIBRARY_DIR}/Data/ODBC/src/EnvironmentHandle.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Extractor.cpp - ${LIBRARY_DIR}/Data/ODBC/src/ODBCException.cpp - ${LIBRARY_DIR}/Data/ODBC/src/ODBCMetaColumn.cpp - ${LIBRARY_DIR}/Data/ODBC/src/ODBCStatementImpl.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Parameter.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Preparator.cpp - ${LIBRARY_DIR}/Data/ODBC/src/SessionImpl.cpp - ${LIBRARY_DIR}/Data/ODBC/src/TypeInfo.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Unicode.cpp - ${LIBRARY_DIR}/Data/ODBC/src/Utility.cpp + "${LIBRARY_DIR}/Data/ODBC/src/Binder.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/ConnectionHandle.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Connector.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/EnvironmentHandle.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Extractor.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/ODBCException.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/ODBCMetaColumn.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/ODBCStatementImpl.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Parameter.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Preparator.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/SessionImpl.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/TypeInfo.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Unicode.cpp" + "${LIBRARY_DIR}/Data/ODBC/src/Utility.cpp" ) add_library (_poco_data_odbc ${SRCS}) add_library (Poco::Data::ODBC ALIAS _poco_data_odbc) target_compile_options (_poco_data_odbc PRIVATE -Wno-unused-variable) - target_include_directories (_poco_data_odbc SYSTEM PUBLIC ${LIBRARY_DIR}/Data/ODBC/include) + target_include_directories (_poco_data_odbc SYSTEM PUBLIC "${LIBRARY_DIR}/Data/ODBC/include") target_link_libraries (_poco_data_odbc PUBLIC Poco::Data unixodbc) else () add_library (Poco::Data::ODBC UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Foundation/CMakeLists.txt b/contrib/poco-cmake/Foundation/CMakeLists.txt index f4647461ec0..a9a4933873c 100644 --- a/contrib/poco-cmake/Foundation/CMakeLists.txt +++ b/contrib/poco-cmake/Foundation/CMakeLists.txt @@ -2,27 +2,27 @@ if (USE_INTERNAL_POCO_LIBRARY) # Foundation (pcre) set (SRCS_PCRE - ${LIBRARY_DIR}/Foundation/src/pcre_config.c - ${LIBRARY_DIR}/Foundation/src/pcre_byte_order.c - ${LIBRARY_DIR}/Foundation/src/pcre_chartables.c - ${LIBRARY_DIR}/Foundation/src/pcre_compile.c - ${LIBRARY_DIR}/Foundation/src/pcre_exec.c - ${LIBRARY_DIR}/Foundation/src/pcre_fullinfo.c - ${LIBRARY_DIR}/Foundation/src/pcre_globals.c - ${LIBRARY_DIR}/Foundation/src/pcre_maketables.c - ${LIBRARY_DIR}/Foundation/src/pcre_newline.c - ${LIBRARY_DIR}/Foundation/src/pcre_ord2utf8.c - ${LIBRARY_DIR}/Foundation/src/pcre_study.c - ${LIBRARY_DIR}/Foundation/src/pcre_tables.c - ${LIBRARY_DIR}/Foundation/src/pcre_dfa_exec.c - ${LIBRARY_DIR}/Foundation/src/pcre_get.c - ${LIBRARY_DIR}/Foundation/src/pcre_jit_compile.c - ${LIBRARY_DIR}/Foundation/src/pcre_refcount.c - ${LIBRARY_DIR}/Foundation/src/pcre_string_utils.c - ${LIBRARY_DIR}/Foundation/src/pcre_version.c - ${LIBRARY_DIR}/Foundation/src/pcre_ucd.c - ${LIBRARY_DIR}/Foundation/src/pcre_valid_utf8.c - ${LIBRARY_DIR}/Foundation/src/pcre_xclass.c + "${LIBRARY_DIR}/Foundation/src/pcre_config.c" + "${LIBRARY_DIR}/Foundation/src/pcre_byte_order.c" + "${LIBRARY_DIR}/Foundation/src/pcre_chartables.c" + "${LIBRARY_DIR}/Foundation/src/pcre_compile.c" + "${LIBRARY_DIR}/Foundation/src/pcre_exec.c" + "${LIBRARY_DIR}/Foundation/src/pcre_fullinfo.c" + "${LIBRARY_DIR}/Foundation/src/pcre_globals.c" + "${LIBRARY_DIR}/Foundation/src/pcre_maketables.c" + "${LIBRARY_DIR}/Foundation/src/pcre_newline.c" + "${LIBRARY_DIR}/Foundation/src/pcre_ord2utf8.c" + "${LIBRARY_DIR}/Foundation/src/pcre_study.c" + "${LIBRARY_DIR}/Foundation/src/pcre_tables.c" + "${LIBRARY_DIR}/Foundation/src/pcre_dfa_exec.c" + "${LIBRARY_DIR}/Foundation/src/pcre_get.c" + "${LIBRARY_DIR}/Foundation/src/pcre_jit_compile.c" + "${LIBRARY_DIR}/Foundation/src/pcre_refcount.c" + "${LIBRARY_DIR}/Foundation/src/pcre_string_utils.c" + "${LIBRARY_DIR}/Foundation/src/pcre_version.c" + "${LIBRARY_DIR}/Foundation/src/pcre_ucd.c" + "${LIBRARY_DIR}/Foundation/src/pcre_valid_utf8.c" + "${LIBRARY_DIR}/Foundation/src/pcre_xclass.c" ) add_library (_poco_foundation_pcre ${SRCS_PCRE}) @@ -33,159 +33,159 @@ if (USE_INTERNAL_POCO_LIBRARY) # Foundation set (SRCS - ${LIBRARY_DIR}/Foundation/src/AbstractObserver.cpp - ${LIBRARY_DIR}/Foundation/src/ActiveDispatcher.cpp - ${LIBRARY_DIR}/Foundation/src/ArchiveStrategy.cpp - ${LIBRARY_DIR}/Foundation/src/Ascii.cpp - ${LIBRARY_DIR}/Foundation/src/ASCIIEncoding.cpp - ${LIBRARY_DIR}/Foundation/src/AsyncChannel.cpp - ${LIBRARY_DIR}/Foundation/src/AtomicCounter.cpp - ${LIBRARY_DIR}/Foundation/src/Base32Decoder.cpp - ${LIBRARY_DIR}/Foundation/src/Base32Encoder.cpp - ${LIBRARY_DIR}/Foundation/src/Base64Decoder.cpp - ${LIBRARY_DIR}/Foundation/src/Base64Encoder.cpp - ${LIBRARY_DIR}/Foundation/src/BinaryReader.cpp - ${LIBRARY_DIR}/Foundation/src/BinaryWriter.cpp - ${LIBRARY_DIR}/Foundation/src/Bugcheck.cpp - ${LIBRARY_DIR}/Foundation/src/ByteOrder.cpp - ${LIBRARY_DIR}/Foundation/src/Channel.cpp - ${LIBRARY_DIR}/Foundation/src/Checksum.cpp - ${LIBRARY_DIR}/Foundation/src/Clock.cpp - ${LIBRARY_DIR}/Foundation/src/Condition.cpp - ${LIBRARY_DIR}/Foundation/src/Configurable.cpp - ${LIBRARY_DIR}/Foundation/src/ConsoleChannel.cpp - ${LIBRARY_DIR}/Foundation/src/CountingStream.cpp - ${LIBRARY_DIR}/Foundation/src/DateTime.cpp - ${LIBRARY_DIR}/Foundation/src/DateTimeFormat.cpp - ${LIBRARY_DIR}/Foundation/src/DateTimeFormatter.cpp - ${LIBRARY_DIR}/Foundation/src/DateTimeParser.cpp - ${LIBRARY_DIR}/Foundation/src/Debugger.cpp - ${LIBRARY_DIR}/Foundation/src/DeflatingStream.cpp - ${LIBRARY_DIR}/Foundation/src/DigestEngine.cpp - ${LIBRARY_DIR}/Foundation/src/DigestStream.cpp - ${LIBRARY_DIR}/Foundation/src/DirectoryIterator.cpp - ${LIBRARY_DIR}/Foundation/src/DirectoryIteratorStrategy.cpp - ${LIBRARY_DIR}/Foundation/src/DirectoryWatcher.cpp - ${LIBRARY_DIR}/Foundation/src/Environment.cpp - ${LIBRARY_DIR}/Foundation/src/Error.cpp - ${LIBRARY_DIR}/Foundation/src/ErrorHandler.cpp - ${LIBRARY_DIR}/Foundation/src/Event.cpp - ${LIBRARY_DIR}/Foundation/src/EventArgs.cpp - ${LIBRARY_DIR}/Foundation/src/EventChannel.cpp - ${LIBRARY_DIR}/Foundation/src/Exception.cpp - ${LIBRARY_DIR}/Foundation/src/FIFOBufferStream.cpp - ${LIBRARY_DIR}/Foundation/src/File.cpp - ${LIBRARY_DIR}/Foundation/src/FileChannel.cpp - ${LIBRARY_DIR}/Foundation/src/FileStream.cpp - ${LIBRARY_DIR}/Foundation/src/FileStreamFactory.cpp - ${LIBRARY_DIR}/Foundation/src/Format.cpp - ${LIBRARY_DIR}/Foundation/src/Formatter.cpp - ${LIBRARY_DIR}/Foundation/src/FormattingChannel.cpp - ${LIBRARY_DIR}/Foundation/src/FPEnvironment.cpp - ${LIBRARY_DIR}/Foundation/src/Glob.cpp - ${LIBRARY_DIR}/Foundation/src/Hash.cpp - ${LIBRARY_DIR}/Foundation/src/HashStatistic.cpp - ${LIBRARY_DIR}/Foundation/src/HexBinaryDecoder.cpp - ${LIBRARY_DIR}/Foundation/src/HexBinaryEncoder.cpp - ${LIBRARY_DIR}/Foundation/src/InflatingStream.cpp - ${LIBRARY_DIR}/Foundation/src/JSONString.cpp - ${LIBRARY_DIR}/Foundation/src/Latin1Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/Latin2Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/Latin9Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/LineEndingConverter.cpp - ${LIBRARY_DIR}/Foundation/src/LocalDateTime.cpp - ${LIBRARY_DIR}/Foundation/src/LogFile.cpp - ${LIBRARY_DIR}/Foundation/src/Logger.cpp - ${LIBRARY_DIR}/Foundation/src/LoggingFactory.cpp - ${LIBRARY_DIR}/Foundation/src/LoggingRegistry.cpp - ${LIBRARY_DIR}/Foundation/src/LogStream.cpp - ${LIBRARY_DIR}/Foundation/src/Manifest.cpp - ${LIBRARY_DIR}/Foundation/src/MD4Engine.cpp - ${LIBRARY_DIR}/Foundation/src/MD5Engine.cpp - ${LIBRARY_DIR}/Foundation/src/MemoryPool.cpp - ${LIBRARY_DIR}/Foundation/src/MemoryStream.cpp - ${LIBRARY_DIR}/Foundation/src/Message.cpp - ${LIBRARY_DIR}/Foundation/src/Mutex.cpp - ${LIBRARY_DIR}/Foundation/src/NamedEvent.cpp - ${LIBRARY_DIR}/Foundation/src/NamedMutex.cpp - ${LIBRARY_DIR}/Foundation/src/NestedDiagnosticContext.cpp - ${LIBRARY_DIR}/Foundation/src/Notification.cpp - ${LIBRARY_DIR}/Foundation/src/NotificationCenter.cpp - ${LIBRARY_DIR}/Foundation/src/NotificationQueue.cpp - ${LIBRARY_DIR}/Foundation/src/NullChannel.cpp - ${LIBRARY_DIR}/Foundation/src/NullStream.cpp - ${LIBRARY_DIR}/Foundation/src/NumberFormatter.cpp - ${LIBRARY_DIR}/Foundation/src/NumberParser.cpp - ${LIBRARY_DIR}/Foundation/src/NumericString.cpp - ${LIBRARY_DIR}/Foundation/src/Path.cpp - ${LIBRARY_DIR}/Foundation/src/PatternFormatter.cpp - ${LIBRARY_DIR}/Foundation/src/Pipe.cpp - ${LIBRARY_DIR}/Foundation/src/PipeImpl.cpp - ${LIBRARY_DIR}/Foundation/src/PipeStream.cpp - ${LIBRARY_DIR}/Foundation/src/PriorityNotificationQueue.cpp - ${LIBRARY_DIR}/Foundation/src/Process.cpp - ${LIBRARY_DIR}/Foundation/src/PurgeStrategy.cpp - ${LIBRARY_DIR}/Foundation/src/Random.cpp - ${LIBRARY_DIR}/Foundation/src/RandomStream.cpp - ${LIBRARY_DIR}/Foundation/src/RefCountedObject.cpp - ${LIBRARY_DIR}/Foundation/src/RegularExpression.cpp - ${LIBRARY_DIR}/Foundation/src/RotateStrategy.cpp - ${LIBRARY_DIR}/Foundation/src/Runnable.cpp - ${LIBRARY_DIR}/Foundation/src/RWLock.cpp - ${LIBRARY_DIR}/Foundation/src/Semaphore.cpp - ${LIBRARY_DIR}/Foundation/src/SHA1Engine.cpp - ${LIBRARY_DIR}/Foundation/src/SharedLibrary.cpp - ${LIBRARY_DIR}/Foundation/src/SharedMemory.cpp - ${LIBRARY_DIR}/Foundation/src/SignalHandler.cpp - ${LIBRARY_DIR}/Foundation/src/SimpleFileChannel.cpp - ${LIBRARY_DIR}/Foundation/src/SortedDirectoryIterator.cpp - ${LIBRARY_DIR}/Foundation/src/SplitterChannel.cpp - ${LIBRARY_DIR}/Foundation/src/Stopwatch.cpp - ${LIBRARY_DIR}/Foundation/src/StreamChannel.cpp - ${LIBRARY_DIR}/Foundation/src/StreamConverter.cpp - ${LIBRARY_DIR}/Foundation/src/StreamCopier.cpp - ${LIBRARY_DIR}/Foundation/src/StreamTokenizer.cpp - ${LIBRARY_DIR}/Foundation/src/String.cpp - ${LIBRARY_DIR}/Foundation/src/StringTokenizer.cpp - ${LIBRARY_DIR}/Foundation/src/SynchronizedObject.cpp - ${LIBRARY_DIR}/Foundation/src/SyslogChannel.cpp - ${LIBRARY_DIR}/Foundation/src/Task.cpp - ${LIBRARY_DIR}/Foundation/src/TaskManager.cpp - ${LIBRARY_DIR}/Foundation/src/TaskNotification.cpp - ${LIBRARY_DIR}/Foundation/src/TeeStream.cpp - ${LIBRARY_DIR}/Foundation/src/TemporaryFile.cpp - ${LIBRARY_DIR}/Foundation/src/TextBufferIterator.cpp - ${LIBRARY_DIR}/Foundation/src/TextConverter.cpp - ${LIBRARY_DIR}/Foundation/src/TextEncoding.cpp - ${LIBRARY_DIR}/Foundation/src/TextIterator.cpp - ${LIBRARY_DIR}/Foundation/src/Thread.cpp - ${LIBRARY_DIR}/Foundation/src/ThreadLocal.cpp - ${LIBRARY_DIR}/Foundation/src/ThreadPool.cpp - ${LIBRARY_DIR}/Foundation/src/ThreadTarget.cpp - ${LIBRARY_DIR}/Foundation/src/TimedNotificationQueue.cpp - ${LIBRARY_DIR}/Foundation/src/Timer.cpp - ${LIBRARY_DIR}/Foundation/src/Timespan.cpp - ${LIBRARY_DIR}/Foundation/src/Timestamp.cpp - ${LIBRARY_DIR}/Foundation/src/Timezone.cpp - ${LIBRARY_DIR}/Foundation/src/Token.cpp - ${LIBRARY_DIR}/Foundation/src/Unicode.cpp - ${LIBRARY_DIR}/Foundation/src/UnicodeConverter.cpp - ${LIBRARY_DIR}/Foundation/src/URI.cpp - ${LIBRARY_DIR}/Foundation/src/URIStreamFactory.cpp - ${LIBRARY_DIR}/Foundation/src/URIStreamOpener.cpp - ${LIBRARY_DIR}/Foundation/src/UTF16Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/UTF32Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/UTF8Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/UTF8String.cpp - ${LIBRARY_DIR}/Foundation/src/UUID.cpp - ${LIBRARY_DIR}/Foundation/src/UUIDGenerator.cpp - ${LIBRARY_DIR}/Foundation/src/Var.cpp - ${LIBRARY_DIR}/Foundation/src/VarHolder.cpp - ${LIBRARY_DIR}/Foundation/src/VarIterator.cpp - ${LIBRARY_DIR}/Foundation/src/Void.cpp - ${LIBRARY_DIR}/Foundation/src/Windows1250Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/Windows1251Encoding.cpp - ${LIBRARY_DIR}/Foundation/src/Windows1252Encoding.cpp + "${LIBRARY_DIR}/Foundation/src/AbstractObserver.cpp" + "${LIBRARY_DIR}/Foundation/src/ActiveDispatcher.cpp" + "${LIBRARY_DIR}/Foundation/src/ArchiveStrategy.cpp" + "${LIBRARY_DIR}/Foundation/src/Ascii.cpp" + "${LIBRARY_DIR}/Foundation/src/ASCIIEncoding.cpp" + "${LIBRARY_DIR}/Foundation/src/AsyncChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/AtomicCounter.cpp" + "${LIBRARY_DIR}/Foundation/src/Base32Decoder.cpp" + "${LIBRARY_DIR}/Foundation/src/Base32Encoder.cpp" + "${LIBRARY_DIR}/Foundation/src/Base64Decoder.cpp" + "${LIBRARY_DIR}/Foundation/src/Base64Encoder.cpp" + "${LIBRARY_DIR}/Foundation/src/BinaryReader.cpp" + "${LIBRARY_DIR}/Foundation/src/BinaryWriter.cpp" + "${LIBRARY_DIR}/Foundation/src/Bugcheck.cpp" + "${LIBRARY_DIR}/Foundation/src/ByteOrder.cpp" + "${LIBRARY_DIR}/Foundation/src/Channel.cpp" + "${LIBRARY_DIR}/Foundation/src/Checksum.cpp" + "${LIBRARY_DIR}/Foundation/src/Clock.cpp" + "${LIBRARY_DIR}/Foundation/src/Condition.cpp" + "${LIBRARY_DIR}/Foundation/src/Configurable.cpp" + "${LIBRARY_DIR}/Foundation/src/ConsoleChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/CountingStream.cpp" + "${LIBRARY_DIR}/Foundation/src/DateTime.cpp" + "${LIBRARY_DIR}/Foundation/src/DateTimeFormat.cpp" + "${LIBRARY_DIR}/Foundation/src/DateTimeFormatter.cpp" + "${LIBRARY_DIR}/Foundation/src/DateTimeParser.cpp" + "${LIBRARY_DIR}/Foundation/src/Debugger.cpp" + "${LIBRARY_DIR}/Foundation/src/DeflatingStream.cpp" + "${LIBRARY_DIR}/Foundation/src/DigestEngine.cpp" + "${LIBRARY_DIR}/Foundation/src/DigestStream.cpp" + "${LIBRARY_DIR}/Foundation/src/DirectoryIterator.cpp" + "${LIBRARY_DIR}/Foundation/src/DirectoryIteratorStrategy.cpp" + "${LIBRARY_DIR}/Foundation/src/DirectoryWatcher.cpp" + "${LIBRARY_DIR}/Foundation/src/Environment.cpp" + "${LIBRARY_DIR}/Foundation/src/Error.cpp" + "${LIBRARY_DIR}/Foundation/src/ErrorHandler.cpp" + "${LIBRARY_DIR}/Foundation/src/Event.cpp" + "${LIBRARY_DIR}/Foundation/src/EventArgs.cpp" + "${LIBRARY_DIR}/Foundation/src/EventChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/Exception.cpp" + "${LIBRARY_DIR}/Foundation/src/FIFOBufferStream.cpp" + "${LIBRARY_DIR}/Foundation/src/File.cpp" + "${LIBRARY_DIR}/Foundation/src/FileChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/FileStream.cpp" + "${LIBRARY_DIR}/Foundation/src/FileStreamFactory.cpp" + "${LIBRARY_DIR}/Foundation/src/Format.cpp" + "${LIBRARY_DIR}/Foundation/src/Formatter.cpp" + "${LIBRARY_DIR}/Foundation/src/FormattingChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/FPEnvironment.cpp" + "${LIBRARY_DIR}/Foundation/src/Glob.cpp" + "${LIBRARY_DIR}/Foundation/src/Hash.cpp" + "${LIBRARY_DIR}/Foundation/src/HashStatistic.cpp" + "${LIBRARY_DIR}/Foundation/src/HexBinaryDecoder.cpp" + "${LIBRARY_DIR}/Foundation/src/HexBinaryEncoder.cpp" + "${LIBRARY_DIR}/Foundation/src/InflatingStream.cpp" + "${LIBRARY_DIR}/Foundation/src/JSONString.cpp" + "${LIBRARY_DIR}/Foundation/src/Latin1Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/Latin2Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/Latin9Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/LineEndingConverter.cpp" + "${LIBRARY_DIR}/Foundation/src/LocalDateTime.cpp" + "${LIBRARY_DIR}/Foundation/src/LogFile.cpp" + "${LIBRARY_DIR}/Foundation/src/Logger.cpp" + "${LIBRARY_DIR}/Foundation/src/LoggingFactory.cpp" + "${LIBRARY_DIR}/Foundation/src/LoggingRegistry.cpp" + "${LIBRARY_DIR}/Foundation/src/LogStream.cpp" + "${LIBRARY_DIR}/Foundation/src/Manifest.cpp" + "${LIBRARY_DIR}/Foundation/src/MD4Engine.cpp" + "${LIBRARY_DIR}/Foundation/src/MD5Engine.cpp" + "${LIBRARY_DIR}/Foundation/src/MemoryPool.cpp" + "${LIBRARY_DIR}/Foundation/src/MemoryStream.cpp" + "${LIBRARY_DIR}/Foundation/src/Message.cpp" + "${LIBRARY_DIR}/Foundation/src/Mutex.cpp" + "${LIBRARY_DIR}/Foundation/src/NamedEvent.cpp" + "${LIBRARY_DIR}/Foundation/src/NamedMutex.cpp" + "${LIBRARY_DIR}/Foundation/src/NestedDiagnosticContext.cpp" + "${LIBRARY_DIR}/Foundation/src/Notification.cpp" + "${LIBRARY_DIR}/Foundation/src/NotificationCenter.cpp" + "${LIBRARY_DIR}/Foundation/src/NotificationQueue.cpp" + "${LIBRARY_DIR}/Foundation/src/NullChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/NullStream.cpp" + "${LIBRARY_DIR}/Foundation/src/NumberFormatter.cpp" + "${LIBRARY_DIR}/Foundation/src/NumberParser.cpp" + "${LIBRARY_DIR}/Foundation/src/NumericString.cpp" + "${LIBRARY_DIR}/Foundation/src/Path.cpp" + "${LIBRARY_DIR}/Foundation/src/PatternFormatter.cpp" + "${LIBRARY_DIR}/Foundation/src/Pipe.cpp" + "${LIBRARY_DIR}/Foundation/src/PipeImpl.cpp" + "${LIBRARY_DIR}/Foundation/src/PipeStream.cpp" + "${LIBRARY_DIR}/Foundation/src/PriorityNotificationQueue.cpp" + "${LIBRARY_DIR}/Foundation/src/Process.cpp" + "${LIBRARY_DIR}/Foundation/src/PurgeStrategy.cpp" + "${LIBRARY_DIR}/Foundation/src/Random.cpp" + "${LIBRARY_DIR}/Foundation/src/RandomStream.cpp" + "${LIBRARY_DIR}/Foundation/src/RefCountedObject.cpp" + "${LIBRARY_DIR}/Foundation/src/RegularExpression.cpp" + "${LIBRARY_DIR}/Foundation/src/RotateStrategy.cpp" + "${LIBRARY_DIR}/Foundation/src/Runnable.cpp" + "${LIBRARY_DIR}/Foundation/src/RWLock.cpp" + "${LIBRARY_DIR}/Foundation/src/Semaphore.cpp" + "${LIBRARY_DIR}/Foundation/src/SHA1Engine.cpp" + "${LIBRARY_DIR}/Foundation/src/SharedLibrary.cpp" + "${LIBRARY_DIR}/Foundation/src/SharedMemory.cpp" + "${LIBRARY_DIR}/Foundation/src/SignalHandler.cpp" + "${LIBRARY_DIR}/Foundation/src/SimpleFileChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/SortedDirectoryIterator.cpp" + "${LIBRARY_DIR}/Foundation/src/SplitterChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/Stopwatch.cpp" + "${LIBRARY_DIR}/Foundation/src/StreamChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/StreamConverter.cpp" + "${LIBRARY_DIR}/Foundation/src/StreamCopier.cpp" + "${LIBRARY_DIR}/Foundation/src/StreamTokenizer.cpp" + "${LIBRARY_DIR}/Foundation/src/String.cpp" + "${LIBRARY_DIR}/Foundation/src/StringTokenizer.cpp" + "${LIBRARY_DIR}/Foundation/src/SynchronizedObject.cpp" + "${LIBRARY_DIR}/Foundation/src/SyslogChannel.cpp" + "${LIBRARY_DIR}/Foundation/src/Task.cpp" + "${LIBRARY_DIR}/Foundation/src/TaskManager.cpp" + "${LIBRARY_DIR}/Foundation/src/TaskNotification.cpp" + "${LIBRARY_DIR}/Foundation/src/TeeStream.cpp" + "${LIBRARY_DIR}/Foundation/src/TemporaryFile.cpp" + "${LIBRARY_DIR}/Foundation/src/TextBufferIterator.cpp" + "${LIBRARY_DIR}/Foundation/src/TextConverter.cpp" + "${LIBRARY_DIR}/Foundation/src/TextEncoding.cpp" + "${LIBRARY_DIR}/Foundation/src/TextIterator.cpp" + "${LIBRARY_DIR}/Foundation/src/Thread.cpp" + "${LIBRARY_DIR}/Foundation/src/ThreadLocal.cpp" + "${LIBRARY_DIR}/Foundation/src/ThreadPool.cpp" + "${LIBRARY_DIR}/Foundation/src/ThreadTarget.cpp" + "${LIBRARY_DIR}/Foundation/src/TimedNotificationQueue.cpp" + "${LIBRARY_DIR}/Foundation/src/Timer.cpp" + "${LIBRARY_DIR}/Foundation/src/Timespan.cpp" + "${LIBRARY_DIR}/Foundation/src/Timestamp.cpp" + "${LIBRARY_DIR}/Foundation/src/Timezone.cpp" + "${LIBRARY_DIR}/Foundation/src/Token.cpp" + "${LIBRARY_DIR}/Foundation/src/Unicode.cpp" + "${LIBRARY_DIR}/Foundation/src/UnicodeConverter.cpp" + "${LIBRARY_DIR}/Foundation/src/URI.cpp" + "${LIBRARY_DIR}/Foundation/src/URIStreamFactory.cpp" + "${LIBRARY_DIR}/Foundation/src/URIStreamOpener.cpp" + "${LIBRARY_DIR}/Foundation/src/UTF16Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/UTF32Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/UTF8Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/UTF8String.cpp" + "${LIBRARY_DIR}/Foundation/src/UUID.cpp" + "${LIBRARY_DIR}/Foundation/src/UUIDGenerator.cpp" + "${LIBRARY_DIR}/Foundation/src/Var.cpp" + "${LIBRARY_DIR}/Foundation/src/VarHolder.cpp" + "${LIBRARY_DIR}/Foundation/src/VarIterator.cpp" + "${LIBRARY_DIR}/Foundation/src/Void.cpp" + "${LIBRARY_DIR}/Foundation/src/Windows1250Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/Windows1251Encoding.cpp" + "${LIBRARY_DIR}/Foundation/src/Windows1252Encoding.cpp" ) add_library (_poco_foundation ${SRCS}) @@ -221,7 +221,7 @@ if (USE_INTERNAL_POCO_LIBRARY) POCO_ENABLE_CPP11 POCO_OS_FAMILY_UNIX ) - target_include_directories (_poco_foundation SYSTEM PUBLIC ${LIBRARY_DIR}/Foundation/include) + target_include_directories (_poco_foundation SYSTEM PUBLIC "${LIBRARY_DIR}/Foundation/include") target_link_libraries (_poco_foundation PRIVATE Poco::Foundation::PCRE ${ZLIB_LIBRARIES}) else () add_library (Poco::Foundation UNKNOWN IMPORTED GLOBAL) @@ -233,3 +233,10 @@ else () message (STATUS "Using Poco::Foundation: ${LIBRARY_POCO_FOUNDATION} ${INCLUDE_POCO_FOUNDATION}") endif () + +if(OS_DARWIN AND ARCH_AARCH64) + target_compile_definitions (_poco_foundation + PRIVATE + POCO_NO_STAT64 + ) +endif() diff --git a/contrib/poco-cmake/JSON/CMakeLists.txt b/contrib/poco-cmake/JSON/CMakeLists.txt index 89054cf225d..7033b800d5d 100644 --- a/contrib/poco-cmake/JSON/CMakeLists.txt +++ b/contrib/poco-cmake/JSON/CMakeLists.txt @@ -2,7 +2,7 @@ if (USE_INTERNAL_POCO_LIBRARY) # Poco::JSON (pdjson) set (SRCS_PDJSON - ${LIBRARY_DIR}/JSON/src/pdjson.c + "${LIBRARY_DIR}/JSON/src/pdjson.c" ) add_library (_poco_json_pdjson ${SRCS_PDJSON}) @@ -11,24 +11,24 @@ if (USE_INTERNAL_POCO_LIBRARY) # Poco::JSON set (SRCS - ${LIBRARY_DIR}/JSON/src/Array.cpp - ${LIBRARY_DIR}/JSON/src/Handler.cpp - ${LIBRARY_DIR}/JSON/src/JSONException.cpp - ${LIBRARY_DIR}/JSON/src/Object.cpp - ${LIBRARY_DIR}/JSON/src/ParseHandler.cpp - ${LIBRARY_DIR}/JSON/src/Parser.cpp - ${LIBRARY_DIR}/JSON/src/ParserImpl.cpp - ${LIBRARY_DIR}/JSON/src/PrintHandler.cpp - ${LIBRARY_DIR}/JSON/src/Query.cpp - ${LIBRARY_DIR}/JSON/src/Stringifier.cpp - ${LIBRARY_DIR}/JSON/src/Template.cpp - ${LIBRARY_DIR}/JSON/src/TemplateCache.cpp + "${LIBRARY_DIR}/JSON/src/Array.cpp" + "${LIBRARY_DIR}/JSON/src/Handler.cpp" + "${LIBRARY_DIR}/JSON/src/JSONException.cpp" + "${LIBRARY_DIR}/JSON/src/Object.cpp" + "${LIBRARY_DIR}/JSON/src/ParseHandler.cpp" + "${LIBRARY_DIR}/JSON/src/Parser.cpp" + "${LIBRARY_DIR}/JSON/src/ParserImpl.cpp" + "${LIBRARY_DIR}/JSON/src/PrintHandler.cpp" + "${LIBRARY_DIR}/JSON/src/Query.cpp" + "${LIBRARY_DIR}/JSON/src/Stringifier.cpp" + "${LIBRARY_DIR}/JSON/src/Template.cpp" + "${LIBRARY_DIR}/JSON/src/TemplateCache.cpp" ) add_library (_poco_json ${SRCS}) add_library (Poco::JSON ALIAS _poco_json) - target_include_directories (_poco_json SYSTEM PUBLIC ${LIBRARY_DIR}/JSON/include) + target_include_directories (_poco_json SYSTEM PUBLIC "${LIBRARY_DIR}/JSON/include") target_link_libraries (_poco_json PUBLIC Poco::Foundation Poco::JSON::Pdjson) else () add_library (Poco::JSON UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/MongoDB/CMakeLists.txt b/contrib/poco-cmake/MongoDB/CMakeLists.txt index 0d79f680a64..e3dce7ac5cd 100644 --- a/contrib/poco-cmake/MongoDB/CMakeLists.txt +++ b/contrib/poco-cmake/MongoDB/CMakeLists.txt @@ -1,32 +1,32 @@ if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/MongoDB/src/Array.cpp - ${LIBRARY_DIR}/MongoDB/src/Binary.cpp - ${LIBRARY_DIR}/MongoDB/src/Connection.cpp - ${LIBRARY_DIR}/MongoDB/src/Cursor.cpp - ${LIBRARY_DIR}/MongoDB/src/Database.cpp - ${LIBRARY_DIR}/MongoDB/src/DeleteRequest.cpp - ${LIBRARY_DIR}/MongoDB/src/Document.cpp - ${LIBRARY_DIR}/MongoDB/src/Element.cpp - ${LIBRARY_DIR}/MongoDB/src/GetMoreRequest.cpp - ${LIBRARY_DIR}/MongoDB/src/InsertRequest.cpp - ${LIBRARY_DIR}/MongoDB/src/JavaScriptCode.cpp - ${LIBRARY_DIR}/MongoDB/src/KillCursorsRequest.cpp - ${LIBRARY_DIR}/MongoDB/src/Message.cpp - ${LIBRARY_DIR}/MongoDB/src/MessageHeader.cpp - ${LIBRARY_DIR}/MongoDB/src/ObjectId.cpp - ${LIBRARY_DIR}/MongoDB/src/QueryRequest.cpp - ${LIBRARY_DIR}/MongoDB/src/RegularExpression.cpp - ${LIBRARY_DIR}/MongoDB/src/ReplicaSet.cpp - ${LIBRARY_DIR}/MongoDB/src/RequestMessage.cpp - ${LIBRARY_DIR}/MongoDB/src/ResponseMessage.cpp - ${LIBRARY_DIR}/MongoDB/src/UpdateRequest.cpp + "${LIBRARY_DIR}/MongoDB/src/Array.cpp" + "${LIBRARY_DIR}/MongoDB/src/Binary.cpp" + "${LIBRARY_DIR}/MongoDB/src/Connection.cpp" + "${LIBRARY_DIR}/MongoDB/src/Cursor.cpp" + "${LIBRARY_DIR}/MongoDB/src/Database.cpp" + "${LIBRARY_DIR}/MongoDB/src/DeleteRequest.cpp" + "${LIBRARY_DIR}/MongoDB/src/Document.cpp" + "${LIBRARY_DIR}/MongoDB/src/Element.cpp" + "${LIBRARY_DIR}/MongoDB/src/GetMoreRequest.cpp" + "${LIBRARY_DIR}/MongoDB/src/InsertRequest.cpp" + "${LIBRARY_DIR}/MongoDB/src/JavaScriptCode.cpp" + "${LIBRARY_DIR}/MongoDB/src/KillCursorsRequest.cpp" + "${LIBRARY_DIR}/MongoDB/src/Message.cpp" + "${LIBRARY_DIR}/MongoDB/src/MessageHeader.cpp" + "${LIBRARY_DIR}/MongoDB/src/ObjectId.cpp" + "${LIBRARY_DIR}/MongoDB/src/QueryRequest.cpp" + "${LIBRARY_DIR}/MongoDB/src/RegularExpression.cpp" + "${LIBRARY_DIR}/MongoDB/src/ReplicaSet.cpp" + "${LIBRARY_DIR}/MongoDB/src/RequestMessage.cpp" + "${LIBRARY_DIR}/MongoDB/src/ResponseMessage.cpp" + "${LIBRARY_DIR}/MongoDB/src/UpdateRequest.cpp" ) add_library (_poco_mongodb ${SRCS}) add_library (Poco::MongoDB ALIAS _poco_mongodb) - target_include_directories (_poco_mongodb SYSTEM PUBLIC ${LIBRARY_DIR}/MongoDB/include) + target_include_directories (_poco_mongodb SYSTEM PUBLIC "${LIBRARY_DIR}/MongoDB/include") target_link_libraries (_poco_mongodb PUBLIC Poco::Net) else () add_library (Poco::MongoDB UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Net/CMakeLists.txt b/contrib/poco-cmake/Net/CMakeLists.txt index 9bc06e52e05..45989af8d45 100644 --- a/contrib/poco-cmake/Net/CMakeLists.txt +++ b/contrib/poco-cmake/Net/CMakeLists.txt @@ -1,105 +1,105 @@ if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Net/src/AbstractHTTPRequestHandler.cpp - ${LIBRARY_DIR}/Net/src/DatagramSocket.cpp - ${LIBRARY_DIR}/Net/src/DatagramSocketImpl.cpp - ${LIBRARY_DIR}/Net/src/DialogSocket.cpp - ${LIBRARY_DIR}/Net/src/DNS.cpp - ${LIBRARY_DIR}/Net/src/FilePartSource.cpp - ${LIBRARY_DIR}/Net/src/FTPClientSession.cpp - ${LIBRARY_DIR}/Net/src/FTPStreamFactory.cpp - ${LIBRARY_DIR}/Net/src/HostEntry.cpp - ${LIBRARY_DIR}/Net/src/HTMLForm.cpp - ${LIBRARY_DIR}/Net/src/HTTPAuthenticationParams.cpp - ${LIBRARY_DIR}/Net/src/HTTPBasicCredentials.cpp - ${LIBRARY_DIR}/Net/src/HTTPBufferAllocator.cpp - ${LIBRARY_DIR}/Net/src/HTTPChunkedStream.cpp - ${LIBRARY_DIR}/Net/src/HTTPClientSession.cpp - ${LIBRARY_DIR}/Net/src/HTTPCookie.cpp - ${LIBRARY_DIR}/Net/src/HTTPCredentials.cpp - ${LIBRARY_DIR}/Net/src/HTTPDigestCredentials.cpp - ${LIBRARY_DIR}/Net/src/HTTPFixedLengthStream.cpp - ${LIBRARY_DIR}/Net/src/HTTPHeaderStream.cpp - ${LIBRARY_DIR}/Net/src/HTTPIOStream.cpp - ${LIBRARY_DIR}/Net/src/HTTPMessage.cpp - ${LIBRARY_DIR}/Net/src/HTTPRequest.cpp - ${LIBRARY_DIR}/Net/src/HTTPRequestHandler.cpp - ${LIBRARY_DIR}/Net/src/HTTPRequestHandlerFactory.cpp - ${LIBRARY_DIR}/Net/src/HTTPResponse.cpp - ${LIBRARY_DIR}/Net/src/HTTPServer.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerConnection.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerConnectionFactory.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerParams.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerRequest.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerRequestImpl.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerResponse.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerResponseImpl.cpp - ${LIBRARY_DIR}/Net/src/HTTPServerSession.cpp - ${LIBRARY_DIR}/Net/src/HTTPSession.cpp - ${LIBRARY_DIR}/Net/src/HTTPSessionFactory.cpp - ${LIBRARY_DIR}/Net/src/HTTPSessionInstantiator.cpp - ${LIBRARY_DIR}/Net/src/HTTPStream.cpp - ${LIBRARY_DIR}/Net/src/HTTPStreamFactory.cpp - ${LIBRARY_DIR}/Net/src/ICMPClient.cpp - ${LIBRARY_DIR}/Net/src/ICMPEventArgs.cpp - ${LIBRARY_DIR}/Net/src/ICMPPacket.cpp - ${LIBRARY_DIR}/Net/src/ICMPPacketImpl.cpp - ${LIBRARY_DIR}/Net/src/ICMPSocket.cpp - ${LIBRARY_DIR}/Net/src/ICMPSocketImpl.cpp - ${LIBRARY_DIR}/Net/src/ICMPv4PacketImpl.cpp - ${LIBRARY_DIR}/Net/src/IPAddress.cpp - ${LIBRARY_DIR}/Net/src/IPAddressImpl.cpp - ${LIBRARY_DIR}/Net/src/MailMessage.cpp - ${LIBRARY_DIR}/Net/src/MailRecipient.cpp - ${LIBRARY_DIR}/Net/src/MailStream.cpp - ${LIBRARY_DIR}/Net/src/MediaType.cpp - ${LIBRARY_DIR}/Net/src/MessageHeader.cpp - ${LIBRARY_DIR}/Net/src/MulticastSocket.cpp - ${LIBRARY_DIR}/Net/src/MultipartReader.cpp - ${LIBRARY_DIR}/Net/src/MultipartWriter.cpp - ${LIBRARY_DIR}/Net/src/NameValueCollection.cpp - ${LIBRARY_DIR}/Net/src/Net.cpp - ${LIBRARY_DIR}/Net/src/NetException.cpp - ${LIBRARY_DIR}/Net/src/NetworkInterface.cpp - ${LIBRARY_DIR}/Net/src/NTPClient.cpp - ${LIBRARY_DIR}/Net/src/NTPEventArgs.cpp - ${LIBRARY_DIR}/Net/src/NTPPacket.cpp - ${LIBRARY_DIR}/Net/src/NullPartHandler.cpp - ${LIBRARY_DIR}/Net/src/OAuth10Credentials.cpp - ${LIBRARY_DIR}/Net/src/OAuth20Credentials.cpp - ${LIBRARY_DIR}/Net/src/PartHandler.cpp - ${LIBRARY_DIR}/Net/src/PartSource.cpp - ${LIBRARY_DIR}/Net/src/PartStore.cpp - ${LIBRARY_DIR}/Net/src/PollSet.cpp - ${LIBRARY_DIR}/Net/src/POP3ClientSession.cpp - ${LIBRARY_DIR}/Net/src/QuotedPrintableDecoder.cpp - ${LIBRARY_DIR}/Net/src/QuotedPrintableEncoder.cpp - ${LIBRARY_DIR}/Net/src/RawSocket.cpp - ${LIBRARY_DIR}/Net/src/RawSocketImpl.cpp - ${LIBRARY_DIR}/Net/src/RemoteSyslogChannel.cpp - ${LIBRARY_DIR}/Net/src/RemoteSyslogListener.cpp - ${LIBRARY_DIR}/Net/src/ServerSocket.cpp - ${LIBRARY_DIR}/Net/src/ServerSocketImpl.cpp - ${LIBRARY_DIR}/Net/src/SMTPChannel.cpp - ${LIBRARY_DIR}/Net/src/SMTPClientSession.cpp - ${LIBRARY_DIR}/Net/src/Socket.cpp - ${LIBRARY_DIR}/Net/src/SocketAddress.cpp - ${LIBRARY_DIR}/Net/src/SocketAddressImpl.cpp - ${LIBRARY_DIR}/Net/src/SocketImpl.cpp - ${LIBRARY_DIR}/Net/src/SocketNotification.cpp - ${LIBRARY_DIR}/Net/src/SocketNotifier.cpp - ${LIBRARY_DIR}/Net/src/SocketReactor.cpp - ${LIBRARY_DIR}/Net/src/SocketStream.cpp - ${LIBRARY_DIR}/Net/src/StreamSocket.cpp - ${LIBRARY_DIR}/Net/src/StreamSocketImpl.cpp - ${LIBRARY_DIR}/Net/src/StringPartSource.cpp - ${LIBRARY_DIR}/Net/src/TCPServer.cpp - ${LIBRARY_DIR}/Net/src/TCPServerConnection.cpp - ${LIBRARY_DIR}/Net/src/TCPServerConnectionFactory.cpp - ${LIBRARY_DIR}/Net/src/TCPServerDispatcher.cpp - ${LIBRARY_DIR}/Net/src/TCPServerParams.cpp - ${LIBRARY_DIR}/Net/src/WebSocket.cpp - ${LIBRARY_DIR}/Net/src/WebSocketImpl.cpp + "${LIBRARY_DIR}/Net/src/AbstractHTTPRequestHandler.cpp" + "${LIBRARY_DIR}/Net/src/DatagramSocket.cpp" + "${LIBRARY_DIR}/Net/src/DatagramSocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/DialogSocket.cpp" + "${LIBRARY_DIR}/Net/src/DNS.cpp" + "${LIBRARY_DIR}/Net/src/FilePartSource.cpp" + "${LIBRARY_DIR}/Net/src/FTPClientSession.cpp" + "${LIBRARY_DIR}/Net/src/FTPStreamFactory.cpp" + "${LIBRARY_DIR}/Net/src/HostEntry.cpp" + "${LIBRARY_DIR}/Net/src/HTMLForm.cpp" + "${LIBRARY_DIR}/Net/src/HTTPAuthenticationParams.cpp" + "${LIBRARY_DIR}/Net/src/HTTPBasicCredentials.cpp" + "${LIBRARY_DIR}/Net/src/HTTPBufferAllocator.cpp" + "${LIBRARY_DIR}/Net/src/HTTPChunkedStream.cpp" + "${LIBRARY_DIR}/Net/src/HTTPClientSession.cpp" + "${LIBRARY_DIR}/Net/src/HTTPCookie.cpp" + "${LIBRARY_DIR}/Net/src/HTTPCredentials.cpp" + "${LIBRARY_DIR}/Net/src/HTTPDigestCredentials.cpp" + "${LIBRARY_DIR}/Net/src/HTTPFixedLengthStream.cpp" + "${LIBRARY_DIR}/Net/src/HTTPHeaderStream.cpp" + "${LIBRARY_DIR}/Net/src/HTTPIOStream.cpp" + "${LIBRARY_DIR}/Net/src/HTTPMessage.cpp" + "${LIBRARY_DIR}/Net/src/HTTPRequest.cpp" + "${LIBRARY_DIR}/Net/src/HTTPRequestHandler.cpp" + "${LIBRARY_DIR}/Net/src/HTTPRequestHandlerFactory.cpp" + "${LIBRARY_DIR}/Net/src/HTTPResponse.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServer.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerConnection.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerConnectionFactory.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerParams.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerRequest.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerRequestImpl.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerResponse.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerResponseImpl.cpp" + "${LIBRARY_DIR}/Net/src/HTTPServerSession.cpp" + "${LIBRARY_DIR}/Net/src/HTTPSession.cpp" + "${LIBRARY_DIR}/Net/src/HTTPSessionFactory.cpp" + "${LIBRARY_DIR}/Net/src/HTTPSessionInstantiator.cpp" + "${LIBRARY_DIR}/Net/src/HTTPStream.cpp" + "${LIBRARY_DIR}/Net/src/HTTPStreamFactory.cpp" + "${LIBRARY_DIR}/Net/src/ICMPClient.cpp" + "${LIBRARY_DIR}/Net/src/ICMPEventArgs.cpp" + "${LIBRARY_DIR}/Net/src/ICMPPacket.cpp" + "${LIBRARY_DIR}/Net/src/ICMPPacketImpl.cpp" + "${LIBRARY_DIR}/Net/src/ICMPSocket.cpp" + "${LIBRARY_DIR}/Net/src/ICMPSocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/ICMPv4PacketImpl.cpp" + "${LIBRARY_DIR}/Net/src/IPAddress.cpp" + "${LIBRARY_DIR}/Net/src/IPAddressImpl.cpp" + "${LIBRARY_DIR}/Net/src/MailMessage.cpp" + "${LIBRARY_DIR}/Net/src/MailRecipient.cpp" + "${LIBRARY_DIR}/Net/src/MailStream.cpp" + "${LIBRARY_DIR}/Net/src/MediaType.cpp" + "${LIBRARY_DIR}/Net/src/MessageHeader.cpp" + "${LIBRARY_DIR}/Net/src/MulticastSocket.cpp" + "${LIBRARY_DIR}/Net/src/MultipartReader.cpp" + "${LIBRARY_DIR}/Net/src/MultipartWriter.cpp" + "${LIBRARY_DIR}/Net/src/NameValueCollection.cpp" + "${LIBRARY_DIR}/Net/src/Net.cpp" + "${LIBRARY_DIR}/Net/src/NetException.cpp" + "${LIBRARY_DIR}/Net/src/NetworkInterface.cpp" + "${LIBRARY_DIR}/Net/src/NTPClient.cpp" + "${LIBRARY_DIR}/Net/src/NTPEventArgs.cpp" + "${LIBRARY_DIR}/Net/src/NTPPacket.cpp" + "${LIBRARY_DIR}/Net/src/NullPartHandler.cpp" + "${LIBRARY_DIR}/Net/src/OAuth10Credentials.cpp" + "${LIBRARY_DIR}/Net/src/OAuth20Credentials.cpp" + "${LIBRARY_DIR}/Net/src/PartHandler.cpp" + "${LIBRARY_DIR}/Net/src/PartSource.cpp" + "${LIBRARY_DIR}/Net/src/PartStore.cpp" + "${LIBRARY_DIR}/Net/src/PollSet.cpp" + "${LIBRARY_DIR}/Net/src/POP3ClientSession.cpp" + "${LIBRARY_DIR}/Net/src/QuotedPrintableDecoder.cpp" + "${LIBRARY_DIR}/Net/src/QuotedPrintableEncoder.cpp" + "${LIBRARY_DIR}/Net/src/RawSocket.cpp" + "${LIBRARY_DIR}/Net/src/RawSocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/RemoteSyslogChannel.cpp" + "${LIBRARY_DIR}/Net/src/RemoteSyslogListener.cpp" + "${LIBRARY_DIR}/Net/src/ServerSocket.cpp" + "${LIBRARY_DIR}/Net/src/ServerSocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/SMTPChannel.cpp" + "${LIBRARY_DIR}/Net/src/SMTPClientSession.cpp" + "${LIBRARY_DIR}/Net/src/Socket.cpp" + "${LIBRARY_DIR}/Net/src/SocketAddress.cpp" + "${LIBRARY_DIR}/Net/src/SocketAddressImpl.cpp" + "${LIBRARY_DIR}/Net/src/SocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/SocketNotification.cpp" + "${LIBRARY_DIR}/Net/src/SocketNotifier.cpp" + "${LIBRARY_DIR}/Net/src/SocketReactor.cpp" + "${LIBRARY_DIR}/Net/src/SocketStream.cpp" + "${LIBRARY_DIR}/Net/src/StreamSocket.cpp" + "${LIBRARY_DIR}/Net/src/StreamSocketImpl.cpp" + "${LIBRARY_DIR}/Net/src/StringPartSource.cpp" + "${LIBRARY_DIR}/Net/src/TCPServer.cpp" + "${LIBRARY_DIR}/Net/src/TCPServerConnection.cpp" + "${LIBRARY_DIR}/Net/src/TCPServerConnectionFactory.cpp" + "${LIBRARY_DIR}/Net/src/TCPServerDispatcher.cpp" + "${LIBRARY_DIR}/Net/src/TCPServerParams.cpp" + "${LIBRARY_DIR}/Net/src/WebSocket.cpp" + "${LIBRARY_DIR}/Net/src/WebSocketImpl.cpp" ) add_library (_poco_net ${SRCS}) @@ -125,7 +125,7 @@ if (USE_INTERNAL_POCO_LIBRARY) -Wno-deprecated -Wno-extra-semi ) - target_include_directories (_poco_net SYSTEM PUBLIC ${LIBRARY_DIR}/Net/include) + target_include_directories (_poco_net SYSTEM PUBLIC "${LIBRARY_DIR}/Net/include") target_link_libraries (_poco_net PUBLIC Poco::Foundation) else () add_library (Poco::Net UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Net/SSL/CMakeLists.txt b/contrib/poco-cmake/Net/SSL/CMakeLists.txt index 7cc71f441c7..4b3adacfb8f 100644 --- a/contrib/poco-cmake/Net/SSL/CMakeLists.txt +++ b/contrib/poco-cmake/Net/SSL/CMakeLists.txt @@ -1,39 +1,39 @@ if (ENABLE_SSL) if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/AcceptCertificateHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/CertificateHandlerFactory.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/CertificateHandlerFactoryMgr.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/ConsoleCertificateHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/Context.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSClientSession.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSSessionInstantiator.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSStreamFactory.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/InvalidCertificateHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/KeyConsoleHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/KeyFileHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyFactory.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyFactoryMgr.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyPassphraseHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/RejectCertificateHandler.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureServerSocket.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureServerSocketImpl.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureSMTPClientSession.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureSocketImpl.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureStreamSocket.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureStreamSocketImpl.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/Session.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SSLException.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/SSLManager.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/Utility.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/VerificationErrorArgs.cpp - ${LIBRARY_DIR}/NetSSL_OpenSSL/src/X509Certificate.cpp + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/AcceptCertificateHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/CertificateHandlerFactory.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/CertificateHandlerFactoryMgr.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/ConsoleCertificateHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/Context.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSClientSession.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSSessionInstantiator.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/HTTPSStreamFactory.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/InvalidCertificateHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/KeyConsoleHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/KeyFileHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyFactory.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyFactoryMgr.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/PrivateKeyPassphraseHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/RejectCertificateHandler.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureServerSocket.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureServerSocketImpl.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureSMTPClientSession.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureSocketImpl.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureStreamSocket.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SecureStreamSocketImpl.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/Session.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SSLException.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/SSLManager.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/Utility.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/VerificationErrorArgs.cpp" + "${LIBRARY_DIR}/NetSSL_OpenSSL/src/X509Certificate.cpp" ) add_library (_poco_net_ssl ${SRCS}) add_library (Poco::Net::SSL ALIAS _poco_net_ssl) - target_include_directories (_poco_net_ssl SYSTEM PUBLIC ${LIBRARY_DIR}/NetSSL_OpenSSL/include) + target_include_directories (_poco_net_ssl SYSTEM PUBLIC "${LIBRARY_DIR}/NetSSL_OpenSSL/include") target_link_libraries (_poco_net_ssl PUBLIC Poco::Crypto Poco::Net Poco::Util) else () add_library (Poco::Net::SSL UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Redis/CMakeLists.txt b/contrib/poco-cmake/Redis/CMakeLists.txt index 43d0009101c..b5892addd85 100644 --- a/contrib/poco-cmake/Redis/CMakeLists.txt +++ b/contrib/poco-cmake/Redis/CMakeLists.txt @@ -1,14 +1,14 @@ if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Redis/src/Array.cpp - ${LIBRARY_DIR}/Redis/src/AsyncReader.cpp - ${LIBRARY_DIR}/Redis/src/Client.cpp - ${LIBRARY_DIR}/Redis/src/Command.cpp - ${LIBRARY_DIR}/Redis/src/Error.cpp - ${LIBRARY_DIR}/Redis/src/Exception.cpp - ${LIBRARY_DIR}/Redis/src/RedisEventArgs.cpp - ${LIBRARY_DIR}/Redis/src/RedisStream.cpp - ${LIBRARY_DIR}/Redis/src/Type.cpp + "${LIBRARY_DIR}/Redis/src/Array.cpp" + "${LIBRARY_DIR}/Redis/src/AsyncReader.cpp" + "${LIBRARY_DIR}/Redis/src/Client.cpp" + "${LIBRARY_DIR}/Redis/src/Command.cpp" + "${LIBRARY_DIR}/Redis/src/Error.cpp" + "${LIBRARY_DIR}/Redis/src/Exception.cpp" + "${LIBRARY_DIR}/Redis/src/RedisEventArgs.cpp" + "${LIBRARY_DIR}/Redis/src/RedisStream.cpp" + "${LIBRARY_DIR}/Redis/src/Type.cpp" ) add_library (_poco_redis ${SRCS}) @@ -18,7 +18,7 @@ if (USE_INTERNAL_POCO_LIBRARY) target_compile_options (_poco_redis PRIVATE -Wno-deprecated-copy) endif () target_compile_options (_poco_redis PRIVATE -Wno-shadow) - target_include_directories (_poco_redis SYSTEM PUBLIC ${LIBRARY_DIR}/Redis/include) + target_include_directories (_poco_redis SYSTEM PUBLIC "${LIBRARY_DIR}/Redis/include") target_link_libraries (_poco_redis PUBLIC Poco::Net) else () add_library (Poco::Redis UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/Util/CMakeLists.txt b/contrib/poco-cmake/Util/CMakeLists.txt index f5af3a5793c..e233e65cfea 100644 --- a/contrib/poco-cmake/Util/CMakeLists.txt +++ b/contrib/poco-cmake/Util/CMakeLists.txt @@ -1,38 +1,38 @@ if (USE_INTERNAL_POCO_LIBRARY) set (SRCS - ${LIBRARY_DIR}/Util/src/AbstractConfiguration.cpp - ${LIBRARY_DIR}/Util/src/Application.cpp - ${LIBRARY_DIR}/Util/src/ConfigurationMapper.cpp - ${LIBRARY_DIR}/Util/src/ConfigurationView.cpp - ${LIBRARY_DIR}/Util/src/FilesystemConfiguration.cpp - ${LIBRARY_DIR}/Util/src/HelpFormatter.cpp - ${LIBRARY_DIR}/Util/src/IniFileConfiguration.cpp - ${LIBRARY_DIR}/Util/src/IntValidator.cpp - ${LIBRARY_DIR}/Util/src/JSONConfiguration.cpp - ${LIBRARY_DIR}/Util/src/LayeredConfiguration.cpp - ${LIBRARY_DIR}/Util/src/LoggingConfigurator.cpp - ${LIBRARY_DIR}/Util/src/LoggingSubsystem.cpp - ${LIBRARY_DIR}/Util/src/MapConfiguration.cpp - ${LIBRARY_DIR}/Util/src/Option.cpp - ${LIBRARY_DIR}/Util/src/OptionCallback.cpp - ${LIBRARY_DIR}/Util/src/OptionException.cpp - ${LIBRARY_DIR}/Util/src/OptionProcessor.cpp - ${LIBRARY_DIR}/Util/src/OptionSet.cpp - ${LIBRARY_DIR}/Util/src/PropertyFileConfiguration.cpp - ${LIBRARY_DIR}/Util/src/RegExpValidator.cpp - ${LIBRARY_DIR}/Util/src/ServerApplication.cpp - ${LIBRARY_DIR}/Util/src/Subsystem.cpp - ${LIBRARY_DIR}/Util/src/SystemConfiguration.cpp - ${LIBRARY_DIR}/Util/src/Timer.cpp - ${LIBRARY_DIR}/Util/src/TimerTask.cpp - ${LIBRARY_DIR}/Util/src/Validator.cpp - ${LIBRARY_DIR}/Util/src/XMLConfiguration.cpp + "${LIBRARY_DIR}/Util/src/AbstractConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/Application.cpp" + "${LIBRARY_DIR}/Util/src/ConfigurationMapper.cpp" + "${LIBRARY_DIR}/Util/src/ConfigurationView.cpp" + "${LIBRARY_DIR}/Util/src/FilesystemConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/HelpFormatter.cpp" + "${LIBRARY_DIR}/Util/src/IniFileConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/IntValidator.cpp" + "${LIBRARY_DIR}/Util/src/JSONConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/LayeredConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/LoggingConfigurator.cpp" + "${LIBRARY_DIR}/Util/src/LoggingSubsystem.cpp" + "${LIBRARY_DIR}/Util/src/MapConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/Option.cpp" + "${LIBRARY_DIR}/Util/src/OptionCallback.cpp" + "${LIBRARY_DIR}/Util/src/OptionException.cpp" + "${LIBRARY_DIR}/Util/src/OptionProcessor.cpp" + "${LIBRARY_DIR}/Util/src/OptionSet.cpp" + "${LIBRARY_DIR}/Util/src/PropertyFileConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/RegExpValidator.cpp" + "${LIBRARY_DIR}/Util/src/ServerApplication.cpp" + "${LIBRARY_DIR}/Util/src/Subsystem.cpp" + "${LIBRARY_DIR}/Util/src/SystemConfiguration.cpp" + "${LIBRARY_DIR}/Util/src/Timer.cpp" + "${LIBRARY_DIR}/Util/src/TimerTask.cpp" + "${LIBRARY_DIR}/Util/src/Validator.cpp" + "${LIBRARY_DIR}/Util/src/XMLConfiguration.cpp" ) add_library (_poco_util ${SRCS}) add_library (Poco::Util ALIAS _poco_util) - target_include_directories (_poco_util SYSTEM PUBLIC ${LIBRARY_DIR}/Util/include) + target_include_directories (_poco_util SYSTEM PUBLIC "${LIBRARY_DIR}/Util/include") target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML) else () add_library (Poco::Util UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/poco-cmake/XML/CMakeLists.txt b/contrib/poco-cmake/XML/CMakeLists.txt index 448b7e22c7c..af801a65f03 100644 --- a/contrib/poco-cmake/XML/CMakeLists.txt +++ b/contrib/poco-cmake/XML/CMakeLists.txt @@ -2,101 +2,101 @@ if (USE_INTERNAL_POCO_LIBRARY) # Poco::XML (expat) set (SRCS_EXPAT - ${LIBRARY_DIR}/XML/src/xmlrole.c - ${LIBRARY_DIR}/XML/src/xmltok_impl.c - ${LIBRARY_DIR}/XML/src/xmltok_ns.c - ${LIBRARY_DIR}/XML/src/xmltok.c + "${LIBRARY_DIR}/XML/src/xmlrole.c" + "${LIBRARY_DIR}/XML/src/xmltok_impl.c" + "${LIBRARY_DIR}/XML/src/xmltok_ns.c" + "${LIBRARY_DIR}/XML/src/xmltok.c" ) add_library (_poco_xml_expat ${SRCS_EXPAT}) add_library (Poco::XML::Expat ALIAS _poco_xml_expat) - target_include_directories (_poco_xml_expat PUBLIC ${LIBRARY_DIR}/XML/include) + target_include_directories (_poco_xml_expat PUBLIC "${LIBRARY_DIR}/XML/include") # Poco::XML set (SRCS - ${LIBRARY_DIR}/XML/src/AbstractContainerNode.cpp - ${LIBRARY_DIR}/XML/src/AbstractNode.cpp - ${LIBRARY_DIR}/XML/src/Attr.cpp - ${LIBRARY_DIR}/XML/src/Attributes.cpp - ${LIBRARY_DIR}/XML/src/AttributesImpl.cpp - ${LIBRARY_DIR}/XML/src/AttrMap.cpp - ${LIBRARY_DIR}/XML/src/CDATASection.cpp - ${LIBRARY_DIR}/XML/src/CharacterData.cpp - ${LIBRARY_DIR}/XML/src/ChildNodesList.cpp - ${LIBRARY_DIR}/XML/src/Comment.cpp - ${LIBRARY_DIR}/XML/src/ContentHandler.cpp - ${LIBRARY_DIR}/XML/src/DeclHandler.cpp - ${LIBRARY_DIR}/XML/src/DefaultHandler.cpp - ${LIBRARY_DIR}/XML/src/Document.cpp - ${LIBRARY_DIR}/XML/src/DocumentEvent.cpp - ${LIBRARY_DIR}/XML/src/DocumentFragment.cpp - ${LIBRARY_DIR}/XML/src/DocumentType.cpp - ${LIBRARY_DIR}/XML/src/DOMBuilder.cpp - ${LIBRARY_DIR}/XML/src/DOMException.cpp - ${LIBRARY_DIR}/XML/src/DOMImplementation.cpp - ${LIBRARY_DIR}/XML/src/DOMObject.cpp - ${LIBRARY_DIR}/XML/src/DOMParser.cpp - ${LIBRARY_DIR}/XML/src/DOMSerializer.cpp - ${LIBRARY_DIR}/XML/src/DOMWriter.cpp - ${LIBRARY_DIR}/XML/src/DTDHandler.cpp - ${LIBRARY_DIR}/XML/src/DTDMap.cpp - ${LIBRARY_DIR}/XML/src/Element.cpp - ${LIBRARY_DIR}/XML/src/ElementsByTagNameList.cpp - ${LIBRARY_DIR}/XML/src/Entity.cpp - ${LIBRARY_DIR}/XML/src/EntityReference.cpp - ${LIBRARY_DIR}/XML/src/EntityResolver.cpp - ${LIBRARY_DIR}/XML/src/EntityResolverImpl.cpp - ${LIBRARY_DIR}/XML/src/ErrorHandler.cpp - ${LIBRARY_DIR}/XML/src/Event.cpp - ${LIBRARY_DIR}/XML/src/EventDispatcher.cpp - ${LIBRARY_DIR}/XML/src/EventException.cpp - ${LIBRARY_DIR}/XML/src/EventListener.cpp - ${LIBRARY_DIR}/XML/src/EventTarget.cpp - ${LIBRARY_DIR}/XML/src/InputSource.cpp - ${LIBRARY_DIR}/XML/src/LexicalHandler.cpp - ${LIBRARY_DIR}/XML/src/Locator.cpp - ${LIBRARY_DIR}/XML/src/LocatorImpl.cpp - ${LIBRARY_DIR}/XML/src/MutationEvent.cpp - ${LIBRARY_DIR}/XML/src/Name.cpp - ${LIBRARY_DIR}/XML/src/NamedNodeMap.cpp - ${LIBRARY_DIR}/XML/src/NamePool.cpp - ${LIBRARY_DIR}/XML/src/NamespaceStrategy.cpp - ${LIBRARY_DIR}/XML/src/NamespaceSupport.cpp - ${LIBRARY_DIR}/XML/src/Node.cpp - ${LIBRARY_DIR}/XML/src/NodeAppender.cpp - ${LIBRARY_DIR}/XML/src/NodeFilter.cpp - ${LIBRARY_DIR}/XML/src/NodeIterator.cpp - ${LIBRARY_DIR}/XML/src/NodeList.cpp - ${LIBRARY_DIR}/XML/src/Notation.cpp - ${LIBRARY_DIR}/XML/src/ParserEngine.cpp - ${LIBRARY_DIR}/XML/src/ProcessingInstruction.cpp - ${LIBRARY_DIR}/XML/src/QName.cpp - ${LIBRARY_DIR}/XML/src/SAXException.cpp - ${LIBRARY_DIR}/XML/src/SAXParser.cpp - ${LIBRARY_DIR}/XML/src/Text.cpp - ${LIBRARY_DIR}/XML/src/TreeWalker.cpp - ${LIBRARY_DIR}/XML/src/ValueTraits.cpp - ${LIBRARY_DIR}/XML/src/WhitespaceFilter.cpp - ${LIBRARY_DIR}/XML/src/XMLException.cpp - ${LIBRARY_DIR}/XML/src/XMLFilter.cpp - ${LIBRARY_DIR}/XML/src/XMLFilterImpl.cpp - ${LIBRARY_DIR}/XML/src/XMLReader.cpp - ${LIBRARY_DIR}/XML/src/XMLStreamParser.cpp - ${LIBRARY_DIR}/XML/src/XMLStreamParserException.cpp - ${LIBRARY_DIR}/XML/src/XMLString.cpp - ${LIBRARY_DIR}/XML/src/XMLWriter.cpp + "${LIBRARY_DIR}/XML/src/AbstractContainerNode.cpp" + "${LIBRARY_DIR}/XML/src/AbstractNode.cpp" + "${LIBRARY_DIR}/XML/src/Attr.cpp" + "${LIBRARY_DIR}/XML/src/Attributes.cpp" + "${LIBRARY_DIR}/XML/src/AttributesImpl.cpp" + "${LIBRARY_DIR}/XML/src/AttrMap.cpp" + "${LIBRARY_DIR}/XML/src/CDATASection.cpp" + "${LIBRARY_DIR}/XML/src/CharacterData.cpp" + "${LIBRARY_DIR}/XML/src/ChildNodesList.cpp" + "${LIBRARY_DIR}/XML/src/Comment.cpp" + "${LIBRARY_DIR}/XML/src/ContentHandler.cpp" + "${LIBRARY_DIR}/XML/src/DeclHandler.cpp" + "${LIBRARY_DIR}/XML/src/DefaultHandler.cpp" + "${LIBRARY_DIR}/XML/src/Document.cpp" + "${LIBRARY_DIR}/XML/src/DocumentEvent.cpp" + "${LIBRARY_DIR}/XML/src/DocumentFragment.cpp" + "${LIBRARY_DIR}/XML/src/DocumentType.cpp" + "${LIBRARY_DIR}/XML/src/DOMBuilder.cpp" + "${LIBRARY_DIR}/XML/src/DOMException.cpp" + "${LIBRARY_DIR}/XML/src/DOMImplementation.cpp" + "${LIBRARY_DIR}/XML/src/DOMObject.cpp" + "${LIBRARY_DIR}/XML/src/DOMParser.cpp" + "${LIBRARY_DIR}/XML/src/DOMSerializer.cpp" + "${LIBRARY_DIR}/XML/src/DOMWriter.cpp" + "${LIBRARY_DIR}/XML/src/DTDHandler.cpp" + "${LIBRARY_DIR}/XML/src/DTDMap.cpp" + "${LIBRARY_DIR}/XML/src/Element.cpp" + "${LIBRARY_DIR}/XML/src/ElementsByTagNameList.cpp" + "${LIBRARY_DIR}/XML/src/Entity.cpp" + "${LIBRARY_DIR}/XML/src/EntityReference.cpp" + "${LIBRARY_DIR}/XML/src/EntityResolver.cpp" + "${LIBRARY_DIR}/XML/src/EntityResolverImpl.cpp" + "${LIBRARY_DIR}/XML/src/ErrorHandler.cpp" + "${LIBRARY_DIR}/XML/src/Event.cpp" + "${LIBRARY_DIR}/XML/src/EventDispatcher.cpp" + "${LIBRARY_DIR}/XML/src/EventException.cpp" + "${LIBRARY_DIR}/XML/src/EventListener.cpp" + "${LIBRARY_DIR}/XML/src/EventTarget.cpp" + "${LIBRARY_DIR}/XML/src/InputSource.cpp" + "${LIBRARY_DIR}/XML/src/LexicalHandler.cpp" + "${LIBRARY_DIR}/XML/src/Locator.cpp" + "${LIBRARY_DIR}/XML/src/LocatorImpl.cpp" + "${LIBRARY_DIR}/XML/src/MutationEvent.cpp" + "${LIBRARY_DIR}/XML/src/Name.cpp" + "${LIBRARY_DIR}/XML/src/NamedNodeMap.cpp" + "${LIBRARY_DIR}/XML/src/NamePool.cpp" + "${LIBRARY_DIR}/XML/src/NamespaceStrategy.cpp" + "${LIBRARY_DIR}/XML/src/NamespaceSupport.cpp" + "${LIBRARY_DIR}/XML/src/Node.cpp" + "${LIBRARY_DIR}/XML/src/NodeAppender.cpp" + "${LIBRARY_DIR}/XML/src/NodeFilter.cpp" + "${LIBRARY_DIR}/XML/src/NodeIterator.cpp" + "${LIBRARY_DIR}/XML/src/NodeList.cpp" + "${LIBRARY_DIR}/XML/src/Notation.cpp" + "${LIBRARY_DIR}/XML/src/ParserEngine.cpp" + "${LIBRARY_DIR}/XML/src/ProcessingInstruction.cpp" + "${LIBRARY_DIR}/XML/src/QName.cpp" + "${LIBRARY_DIR}/XML/src/SAXException.cpp" + "${LIBRARY_DIR}/XML/src/SAXParser.cpp" + "${LIBRARY_DIR}/XML/src/Text.cpp" + "${LIBRARY_DIR}/XML/src/TreeWalker.cpp" + "${LIBRARY_DIR}/XML/src/ValueTraits.cpp" + "${LIBRARY_DIR}/XML/src/WhitespaceFilter.cpp" + "${LIBRARY_DIR}/XML/src/XMLException.cpp" + "${LIBRARY_DIR}/XML/src/XMLFilter.cpp" + "${LIBRARY_DIR}/XML/src/XMLFilterImpl.cpp" + "${LIBRARY_DIR}/XML/src/XMLReader.cpp" + "${LIBRARY_DIR}/XML/src/XMLStreamParser.cpp" + "${LIBRARY_DIR}/XML/src/XMLStreamParserException.cpp" + "${LIBRARY_DIR}/XML/src/XMLString.cpp" + "${LIBRARY_DIR}/XML/src/XMLWriter.cpp" # expat - ${LIBRARY_DIR}/XML/src/xmlparse.cpp + "${LIBRARY_DIR}/XML/src/xmlparse.cpp" ) add_library (_poco_xml ${SRCS}) add_library (Poco::XML ALIAS _poco_xml) target_compile_options (_poco_xml PRIVATE -Wno-old-style-cast) - target_include_directories (_poco_xml SYSTEM PUBLIC ${LIBRARY_DIR}/XML/include) + target_include_directories (_poco_xml SYSTEM PUBLIC "${LIBRARY_DIR}/XML/include") target_link_libraries (_poco_xml PUBLIC Poco::Foundation Poco::XML::Expat) else () add_library (Poco::XML UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/protobuf-cmake/CMakeLists.txt b/contrib/protobuf-cmake/CMakeLists.txt index 1f8d9b02b3e..a4993030d04 100644 --- a/contrib/protobuf-cmake/CMakeLists.txt +++ b/contrib/protobuf-cmake/CMakeLists.txt @@ -14,4 +14,4 @@ add_subdirectory("${protobuf_SOURCE_DIR}/cmake" "${protobuf_BINARY_DIR}") # We don't want to stop compilation on warnings in protobuf's headers. # The following line overrides the value assigned by the command target_include_directories() in libprotobuf.cmake -set_property(TARGET libprotobuf PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES ${protobuf_SOURCE_DIR}/src) +set_property(TARGET libprotobuf PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${protobuf_SOURCE_DIR}/src") diff --git a/contrib/replxx-cmake/CMakeLists.txt b/contrib/replxx-cmake/CMakeLists.txt index df17e0ed646..07f24bae25d 100644 --- a/contrib/replxx-cmake/CMakeLists.txt +++ b/contrib/replxx-cmake/CMakeLists.txt @@ -62,7 +62,7 @@ if (NOT LIBRARY_REPLXX OR NOT INCLUDE_REPLXX OR NOT EXTERNAL_REPLXX_WORKS) ) add_library (replxx ${SRCS}) - target_include_directories(replxx SYSTEM PUBLIC ${LIBRARY_DIR}/include) + target_include_directories(replxx SYSTEM PUBLIC "${LIBRARY_DIR}/include") endif () if (COMPILER_CLANG) diff --git a/contrib/rocksdb b/contrib/rocksdb index 54a0decabbc..07c77549a20 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 54a0decabbcf4c0bb5cf7befa9c597f28289bff5 +Subproject commit 07c77549a20b63ff6981b400085eba36bb5c80c4 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 77a30776a4a..bccc9ed5294 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -2,15 +2,6 @@ set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb") list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/") -if (SANITIZE STREQUAL "undefined") - set(WITH_UBSAN ON) -elseif (SANITIZE STREQUAL "address") - set(WITH_ASAN ON) -elseif (SANITIZE STREQUAL "thread") - set(WITH_TSAN ON) -endif() - - set(PORTABLE ON) ## always disable jemalloc for rocksdb by default ## because it introduces non-standard jemalloc APIs @@ -40,7 +31,7 @@ endif() if(MSVC) option(WITH_XPRESS "build with windows built in compression" OFF) - include(${ROCKSDB_SOURCE_DIR}/thirdparty.inc) + include("${ROCKSDB_SOURCE_DIR}/thirdparty.inc") else() if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD") # FreeBSD has jemalloc as default malloc @@ -71,55 +62,18 @@ else() if(WITH_ZSTD) add_definitions(-DZSTD) include_directories(${ZSTD_INCLUDE_DIR}) - include_directories(${ZSTD_INCLUDE_DIR}/common) - include_directories(${ZSTD_INCLUDE_DIR}/dictBuilder) - include_directories(${ZSTD_INCLUDE_DIR}/deprecated) + include_directories("${ZSTD_INCLUDE_DIR}/common") + include_directories("${ZSTD_INCLUDE_DIR}/dictBuilder") + include_directories("${ZSTD_INCLUDE_DIR}/deprecated") list(APPEND THIRDPARTY_LIBS zstd) endif() endif() -string(TIMESTAMP TS "%Y/%m/%d %H:%M:%S" UTC) -set(GIT_DATE_TIME "${TS}" CACHE STRING "the time we first built rocksdb") - -find_package(Git) - -if(GIT_FOUND AND EXISTS "${ROCKSDB_SOURCE_DIR}/.git") - if(WIN32) - execute_process(COMMAND $ENV{COMSPEC} /C ${GIT_EXECUTABLE} -C ${ROCKSDB_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA) - else() - execute_process(COMMAND ${GIT_EXECUTABLE} -C ${ROCKSDB_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA) - endif() -else() - set(GIT_SHA 0) -endif() - -string(REGEX REPLACE "[^0-9a-f]+" "" GIT_SHA "${GIT_SHA}") - -set(BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/rocksdb_build_version.cc) -configure_file(${ROCKSDB_SOURCE_DIR}/util/build_version.cc.in ${BUILD_VERSION_CC} @ONLY) +set(BUILD_VERSION_CC rocksdb_build_version.cc) add_library(rocksdb_build_version OBJECT ${BUILD_VERSION_CC}) -target_include_directories(rocksdb_build_version PRIVATE - ${ROCKSDB_SOURCE_DIR}/util) -if(MSVC) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W4 /wd4127 /wd4800 /wd4996 /wd4351 /wd4100 /wd4204 /wd4324") -else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wextra -Wall") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-compare -Wshadow -Wno-unused-parameter -Wno-unused-variable -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers -Wno-strict-aliasing") - if(MINGW) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-format -fno-asynchronous-unwind-tables") - add_definitions(-D_POSIX_C_SOURCE=1) - endif() - if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer") - include(CheckCXXCompilerFlag) - CHECK_CXX_COMPILER_FLAG("-momit-leaf-frame-pointer" HAVE_OMIT_LEAF_FRAME_POINTER) - if(HAVE_OMIT_LEAF_FRAME_POINTER) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -momit-leaf-frame-pointer") - endif() - endif() -endif() + +target_include_directories(rocksdb_build_version PRIVATE "${ROCKSDB_SOURCE_DIR}/util") include(CheckCCompilerFlag) if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") @@ -142,14 +96,14 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") endif(HAS_ALTIVEC) endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") -if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") +if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") CHECK_C_COMPILER_FLAG("-march=armv8-a+crc+crypto" HAS_ARMV8_CRC) if(HAS_ARMV8_CRC) message(STATUS " HAS_ARMV8_CRC yes") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") endif(HAS_ARMV8_CRC) -endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") +endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") include(CheckCXXSourceCompiles) @@ -189,50 +143,7 @@ if(HAVE_THREAD_LOCAL) add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL) endif() -option(FAIL_ON_WARNINGS "Treat compile warnings as errors" ON) -if(FAIL_ON_WARNINGS) - if(MSVC) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX") - else() # assume GCC - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") - endif() -endif() - -option(WITH_ASAN "build with ASAN" OFF) -if(WITH_ASAN) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address") - if(WITH_JEMALLOC) - message(FATAL "ASAN does not work well with JeMalloc") - endif() -endif() - -option(WITH_TSAN "build with TSAN" OFF) -if(WITH_TSAN) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread -pie") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread -fPIC") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=thread -fPIC") - if(WITH_JEMALLOC) - message(FATAL "TSAN does not work well with JeMalloc") - endif() -endif() - -option(WITH_UBSAN "build with UBSAN" OFF) -if(WITH_UBSAN) - add_definitions(-DROCKSDB_UBSAN_RUN) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined") - if(WITH_JEMALLOC) - message(FATAL "UBSAN does not work well with JeMalloc") - endif() -endif() - - -if(CMAKE_SYSTEM_NAME MATCHES "Cygwin") - add_definitions(-fno-builtin-memcmp -DCYGWIN) -elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin") +if(CMAKE_SYSTEM_NAME MATCHES "Darwin") add_definitions(-DOS_MACOSX) if(CMAKE_SYSTEM_PROCESSOR MATCHES arm) add_definitions(-DIOS_CROSS_COMPILE -DROCKSDB_LITE) @@ -304,9 +215,9 @@ endif() include(CheckCXXSymbolExists) if(CMAKE_SYSTEM_NAME MATCHES "^FreeBSD") - check_cxx_symbol_exists(malloc_usable_size ${ROCKSDB_SOURCE_DIR}/malloc_np.h HAVE_MALLOC_USABLE_SIZE) + check_cxx_symbol_exists(malloc_usable_size "${ROCKSDB_SOURCE_DIR}/malloc_np.h" HAVE_MALLOC_USABLE_SIZE) else() - check_cxx_symbol_exists(malloc_usable_size ${ROCKSDB_SOURCE_DIR}/malloc.h HAVE_MALLOC_USABLE_SIZE) + check_cxx_symbol_exists(malloc_usable_size "${ROCKSDB_SOURCE_DIR}/malloc.h" HAVE_MALLOC_USABLE_SIZE) endif() if(HAVE_MALLOC_USABLE_SIZE) add_definitions(-DROCKSDB_MALLOC_USABLE_SIZE) @@ -323,347 +234,316 @@ if(HAVE_AUXV_GETAUXVAL) endif() include_directories(${ROCKSDB_SOURCE_DIR}) -include_directories(${ROCKSDB_SOURCE_DIR}/include) +include_directories("${ROCKSDB_SOURCE_DIR}/include") if(WITH_FOLLY_DISTRIBUTED_MUTEX) - include_directories(${ROCKSDB_SOURCE_DIR}/third-party/folly) + include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly") endif() find_package(Threads REQUIRED) # Main library source code set(SOURCES - ${ROCKSDB_SOURCE_DIR}/cache/cache.cc - ${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc - ${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc - ${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc - ${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc - ${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc - ${ROCKSDB_SOURCE_DIR}/db/builder.cc - ${ROCKSDB_SOURCE_DIR}/db/c.cc - ${ROCKSDB_SOURCE_DIR}/db/column_family.cc - ${ROCKSDB_SOURCE_DIR}/db/compacted_db_impl.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc - ${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc - ${ROCKSDB_SOURCE_DIR}/db/convenience.cc - ${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc - ${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc - ${ROCKSDB_SOURCE_DIR}/db/db_iter.cc - ${ROCKSDB_SOURCE_DIR}/db/dbformat.cc - ${ROCKSDB_SOURCE_DIR}/db/error_handler.cc - ${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc - ${ROCKSDB_SOURCE_DIR}/db/experimental.cc - ${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc - ${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc - ${ROCKSDB_SOURCE_DIR}/db/flush_job.cc - ${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc - ${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc - ${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc - ${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc - ${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc - ${ROCKSDB_SOURCE_DIR}/db/log_reader.cc - ${ROCKSDB_SOURCE_DIR}/db/log_writer.cc - ${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc - ${ROCKSDB_SOURCE_DIR}/db/memtable.cc - ${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc - ${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc - ${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc - ${ROCKSDB_SOURCE_DIR}/db/output_validator.cc - ${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc - ${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc - ${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc - ${ROCKSDB_SOURCE_DIR}/db/repair.cc - ${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc - ${ROCKSDB_SOURCE_DIR}/db/table_cache.cc - ${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc - ${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc - ${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc - ${ROCKSDB_SOURCE_DIR}/db/version_builder.cc - ${ROCKSDB_SOURCE_DIR}/db/version_edit.cc - ${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc - ${ROCKSDB_SOURCE_DIR}/db/version_set.cc - ${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc - ${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc - ${ROCKSDB_SOURCE_DIR}/db/write_batch.cc - ${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc - ${ROCKSDB_SOURCE_DIR}/db/write_controller.cc - ${ROCKSDB_SOURCE_DIR}/db/write_thread.cc - ${ROCKSDB_SOURCE_DIR}/env/env.cc - ${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc - ${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc - ${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc - ${ROCKSDB_SOURCE_DIR}/env/file_system.cc - ${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc - ${ROCKSDB_SOURCE_DIR}/env/mock_env.cc - ${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc - ${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc - ${ROCKSDB_SOURCE_DIR}/file/file_util.cc - ${ROCKSDB_SOURCE_DIR}/file/filename.cc - ${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc - ${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc - ${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc - ${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc - ${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc - ${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc - ${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc - ${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc - ${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc - ${ROCKSDB_SOURCE_DIR}/memory/arena.cc - ${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc - ${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc - ${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc - ${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc - ${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc - ${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc - ${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc - ${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc - ${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc - ${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc - ${ROCKSDB_SOURCE_DIR}/options/cf_options.cc - ${ROCKSDB_SOURCE_DIR}/options/configurable.cc - ${ROCKSDB_SOURCE_DIR}/options/customizable.cc - ${ROCKSDB_SOURCE_DIR}/options/db_options.cc - ${ROCKSDB_SOURCE_DIR}/options/options.cc - ${ROCKSDB_SOURCE_DIR}/options/options_helper.cc - ${ROCKSDB_SOURCE_DIR}/options/options_parser.cc - ${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc - ${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc - ${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc - ${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc - ${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/format.cc - ${ROCKSDB_SOURCE_DIR}/table/get_context.cc - ${ROCKSDB_SOURCE_DIR}/table/iterator.cc - ${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc - ${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc - ${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc - ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc - ${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc - ${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc - ${ROCKSDB_SOURCE_DIR}/table/table_factory.cc - ${ROCKSDB_SOURCE_DIR}/table/table_properties.cc - ${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc - ${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc - ${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc - ${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc - ${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc - ${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc - ${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc - ${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc - ${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc - ${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc - ${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc - ${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc - ${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc - ${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc - ${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc - ${ROCKSDB_SOURCE_DIR}/util/coding.cc - ${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc - ${ROCKSDB_SOURCE_DIR}/util/comparator.cc - ${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc - ${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc - ${ROCKSDB_SOURCE_DIR}/util/crc32c.cc - ${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc - ${ROCKSDB_SOURCE_DIR}/util/hash.cc - ${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc - ${ROCKSDB_SOURCE_DIR}/util/random.cc - ${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc - ${ROCKSDB_SOURCE_DIR}/util/slice.cc - ${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc - ${ROCKSDB_SOURCE_DIR}/util/status.cc - ${ROCKSDB_SOURCE_DIR}/util/string_util.cc - ${ROCKSDB_SOURCE_DIR}/util/thread_local.cc - ${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc - ${ROCKSDB_SOURCE_DIR}/util/xxhash.cc - ${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc - ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc - ${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc - ${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc - ${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc - ${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc - ${ROCKSDB_SOURCE_DIR}/utilities/debug.cc - ${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc - ${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc - ${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc - ${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc - ${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc - ${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc - ${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc - ${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc - ${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc - ${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc - ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc - ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc - ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc - ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc - ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc - ${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc - ${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc - ${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc - ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc - ${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc - ${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc + "${ROCKSDB_SOURCE_DIR}/cache/cache.cc" + "${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc" + "${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc" + "${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc" + "${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc" + "${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc" + "${ROCKSDB_SOURCE_DIR}/db/builder.cc" + "${ROCKSDB_SOURCE_DIR}/db/c.cc" + "${ROCKSDB_SOURCE_DIR}/db/column_family.cc" + "${ROCKSDB_SOURCE_DIR}/db/compacted_db_impl.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc" + "${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc" + "${ROCKSDB_SOURCE_DIR}/db/convenience.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc" + "${ROCKSDB_SOURCE_DIR}/db/db_iter.cc" + "${ROCKSDB_SOURCE_DIR}/db/dbformat.cc" + "${ROCKSDB_SOURCE_DIR}/db/error_handler.cc" + "${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc" + "${ROCKSDB_SOURCE_DIR}/db/experimental.cc" + "${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc" + "${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc" + "${ROCKSDB_SOURCE_DIR}/db/flush_job.cc" + "${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc" + "${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc" + "${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc" + "${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc" + "${ROCKSDB_SOURCE_DIR}/db/log_reader.cc" + "${ROCKSDB_SOURCE_DIR}/db/log_writer.cc" + "${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc" + "${ROCKSDB_SOURCE_DIR}/db/memtable.cc" + "${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc" + "${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc" + "${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc" + "${ROCKSDB_SOURCE_DIR}/db/output_validator.cc" + "${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc" + "${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc" + "${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc" + "${ROCKSDB_SOURCE_DIR}/db/repair.cc" + "${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc" + "${ROCKSDB_SOURCE_DIR}/db/table_cache.cc" + "${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc" + "${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc" + "${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc" + "${ROCKSDB_SOURCE_DIR}/db/version_builder.cc" + "${ROCKSDB_SOURCE_DIR}/db/version_edit.cc" + "${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc" + "${ROCKSDB_SOURCE_DIR}/db/version_set.cc" + "${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc" + "${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc" + "${ROCKSDB_SOURCE_DIR}/db/write_batch.cc" + "${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc" + "${ROCKSDB_SOURCE_DIR}/db/write_controller.cc" + "${ROCKSDB_SOURCE_DIR}/db/write_thread.cc" + "${ROCKSDB_SOURCE_DIR}/env/env.cc" + "${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc" + "${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc" + "${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc" + "${ROCKSDB_SOURCE_DIR}/env/file_system.cc" + "${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc" + "${ROCKSDB_SOURCE_DIR}/env/mock_env.cc" + "${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc" + "${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc" + "${ROCKSDB_SOURCE_DIR}/file/file_util.cc" + "${ROCKSDB_SOURCE_DIR}/file/filename.cc" + "${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc" + "${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc" + "${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc" + "${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc" + "${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc" + "${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc" + "${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc" + "${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc" + "${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc" + "${ROCKSDB_SOURCE_DIR}/memory/arena.cc" + "${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc" + "${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc" + "${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc" + "${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc" + "${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc" + "${ROCKSDB_SOURCE_DIR}/options/cf_options.cc" + "${ROCKSDB_SOURCE_DIR}/options/configurable.cc" + "${ROCKSDB_SOURCE_DIR}/options/customizable.cc" + "${ROCKSDB_SOURCE_DIR}/options/db_options.cc" + "${ROCKSDB_SOURCE_DIR}/options/options.cc" + "${ROCKSDB_SOURCE_DIR}/options/options_helper.cc" + "${ROCKSDB_SOURCE_DIR}/options/options_parser.cc" + "${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc" + "${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc" + "${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc" + "${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc" + "${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/format.cc" + "${ROCKSDB_SOURCE_DIR}/table/get_context.cc" + "${ROCKSDB_SOURCE_DIR}/table/iterator.cc" + "${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc" + "${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc" + "${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc" + "${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc" + "${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc" + "${ROCKSDB_SOURCE_DIR}/table/table_factory.cc" + "${ROCKSDB_SOURCE_DIR}/table/table_properties.cc" + "${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc" + "${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc" + "${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc" + "${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc" + "${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc" + "${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc" + "${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc" + "${ROCKSDB_SOURCE_DIR}/tools/io_tracer_parser_tool.cc" + "${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc" + "${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc" + "${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc" + "${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc" + "${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc" + "${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc" + "${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc" + "${ROCKSDB_SOURCE_DIR}/util/coding.cc" + "${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc" + "${ROCKSDB_SOURCE_DIR}/util/comparator.cc" + "${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc" + "${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc" + "${ROCKSDB_SOURCE_DIR}/util/crc32c.cc" + "${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc" + "${ROCKSDB_SOURCE_DIR}/util/hash.cc" + "${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc" + "${ROCKSDB_SOURCE_DIR}/util/random.cc" + "${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc" + "${ROCKSDB_SOURCE_DIR}/util/slice.cc" + "${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc" + "${ROCKSDB_SOURCE_DIR}/util/status.cc" + "${ROCKSDB_SOURCE_DIR}/util/string_util.cc" + "${ROCKSDB_SOURCE_DIR}/util/thread_local.cc" + "${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc" + "${ROCKSDB_SOURCE_DIR}/util/xxhash.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/debug.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc" + "${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc" $) if(HAVE_SSE42 AND NOT MSVC) set_source_files_properties( - ${ROCKSDB_SOURCE_DIR}/util/crc32c.cc + "${ROCKSDB_SOURCE_DIR}/util/crc32c.cc" PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul") endif() if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc.c - ${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc_asm.S) + "${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc.c" + "${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc_asm.S") endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") if(HAS_ARMV8_CRC) list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/util/crc32c_arm64.cc) + "${ROCKSDB_SOURCE_DIR}/util/crc32c_arm64.cc") endif(HAS_ARMV8_CRC) -if(WIN32) - list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/win/io_win.cc - ${ROCKSDB_SOURCE_DIR}/port/win/env_win.cc - ${ROCKSDB_SOURCE_DIR}/port/win/env_default.cc - ${ROCKSDB_SOURCE_DIR}/port/win/port_win.cc - ${ROCKSDB_SOURCE_DIR}/port/win/win_logger.cc) - if(NOT MINGW) - # Mingw only supports std::thread when using - # posix threads. - list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/win/win_thread.cc) - endif() -if(WITH_XPRESS) - list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/win/xpress_win.cc) -endif() - -if(WITH_JEMALLOC) - list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/win/win_jemalloc.cc) -endif() - -else() - list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/port_posix.cc - ${ROCKSDB_SOURCE_DIR}/env/env_posix.cc - ${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc - ${ROCKSDB_SOURCE_DIR}/env/io_posix.cc) -endif() +list(APPEND SOURCES + "${ROCKSDB_SOURCE_DIR}/port/port_posix.cc" + "${ROCKSDB_SOURCE_DIR}/env/env_posix.cc" + "${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc" + "${ROCKSDB_SOURCE_DIR}/env/io_posix.cc") if(WITH_FOLLY_DISTRIBUTED_MUTEX) list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/detail/Futex.cpp - ${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/AtomicNotification.cpp - ${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/DistributedMutex.cpp - ${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/ParkingLot.cpp - ${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp) + "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/detail/Futex.cpp" + "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/AtomicNotification.cpp" + "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/DistributedMutex.cpp" + "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/ParkingLot.cpp" + "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp") endif() set(ROCKSDB_STATIC_LIB rocksdb) -if(WIN32) - set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) -else() - set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT}) -endif() - add_library(${ROCKSDB_STATIC_LIB} STATIC ${SOURCES}) target_link_libraries(${ROCKSDB_STATIC_LIB} PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) diff --git a/contrib/rocksdb-cmake/rocksdb_build_version.cc b/contrib/rocksdb-cmake/rocksdb_build_version.cc new file mode 100644 index 00000000000..8697652ae9f --- /dev/null +++ b/contrib/rocksdb-cmake/rocksdb_build_version.cc @@ -0,0 +1,3 @@ +const char* rocksdb_build_git_sha = "rocksdb_build_git_sha:0"; +const char* rocksdb_build_git_date = "rocksdb_build_git_date:2000-01-01"; +const char* rocksdb_build_compile_date = "2000-01-01"; diff --git a/contrib/simdjson b/contrib/simdjson index 95b4870e20b..8df32cea335 160000 --- a/contrib/simdjson +++ b/contrib/simdjson @@ -1 +1 @@ -Subproject commit 95b4870e20be5f97d9dcf63b23b1c6f520c366c1 +Subproject commit 8df32cea3359cb30120795da6020b3b73da01d38 diff --git a/contrib/simdjson-cmake/CMakeLists.txt b/contrib/simdjson-cmake/CMakeLists.txt index 2fb60b905da..d3bcf6c046c 100644 --- a/contrib/simdjson-cmake/CMakeLists.txt +++ b/contrib/simdjson-cmake/CMakeLists.txt @@ -1,6 +1,6 @@ set(SIMDJSON_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/simdjson/include") set(SIMDJSON_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/simdjson/src") -set(SIMDJSON_SRC ${SIMDJSON_SRC_DIR}/simdjson.cpp) +set(SIMDJSON_SRC "${SIMDJSON_SRC_DIR}/simdjson.cpp") add_library(simdjson ${SIMDJSON_SRC}) target_include_directories(simdjson SYSTEM PUBLIC "${SIMDJSON_INCLUDE_DIR}" PRIVATE "${SIMDJSON_SRC_DIR}") diff --git a/contrib/stats-cmake/CMakeLists.txt b/contrib/stats-cmake/CMakeLists.txt index a159e85a0e3..8279e49c3f0 100644 --- a/contrib/stats-cmake/CMakeLists.txt +++ b/contrib/stats-cmake/CMakeLists.txt @@ -1,7 +1,7 @@ # The stats is a header-only library of probability density functions, # cumulative distribution functions, quantile functions, and random sampling methods. -set(STATS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/stats/include) -set(GCEM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/gcem/include) +set(STATS_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/stats/include") +set(GCEM_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/gcem/include") add_library(stats INTERFACE) diff --git a/contrib/unixodbc-cmake/CMakeLists.txt b/contrib/unixodbc-cmake/CMakeLists.txt index c971c4bdd89..c154533739c 100644 --- a/contrib/unixodbc-cmake/CMakeLists.txt +++ b/contrib/unixodbc-cmake/CMakeLists.txt @@ -2,7 +2,7 @@ if (NOT USE_INTERNAL_ODBC_LIBRARY) return() endif() -set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/unixodbc) +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/unixodbc") # ltdl @@ -10,14 +10,14 @@ set (SRCS_LTDL # This file is generated by 'libtool' inside libltdl directory and then removed. linux_x86_64/libltdl/libltdlcS.c - ${LIBRARY_DIR}/libltdl/lt__alloc.c - ${LIBRARY_DIR}/libltdl/lt__strl.c - ${LIBRARY_DIR}/libltdl/ltdl.c - ${LIBRARY_DIR}/libltdl/lt_dlloader.c - ${LIBRARY_DIR}/libltdl/slist.c - ${LIBRARY_DIR}/libltdl/lt_error.c - ${LIBRARY_DIR}/libltdl/loaders/dlopen.c - ${LIBRARY_DIR}/libltdl/loaders/preopen.c + "${LIBRARY_DIR}/libltdl/lt__alloc.c" + "${LIBRARY_DIR}/libltdl/lt__strl.c" + "${LIBRARY_DIR}/libltdl/ltdl.c" + "${LIBRARY_DIR}/libltdl/lt_dlloader.c" + "${LIBRARY_DIR}/libltdl/slist.c" + "${LIBRARY_DIR}/libltdl/lt_error.c" + "${LIBRARY_DIR}/libltdl/loaders/dlopen.c" + "${LIBRARY_DIR}/libltdl/loaders/preopen.c" ) add_library (ltdl ${SRCS_LTDL}) @@ -26,8 +26,8 @@ target_include_directories(ltdl PRIVATE linux_x86_64/libltdl PUBLIC - ${LIBRARY_DIR}/libltdl - ${LIBRARY_DIR}/libltdl/libltdl + "${LIBRARY_DIR}/libltdl" + "${LIBRARY_DIR}/libltdl/libltdl" ) target_compile_definitions(ltdl PRIVATE -DHAVE_CONFIG_H -DLTDL -DLTDLOPEN=libltdlc) target_compile_options(ltdl PRIVATE -Wno-constant-logical-operand -Wno-unknown-warning-option -O2) @@ -35,238 +35,238 @@ target_compile_options(ltdl PRIVATE -Wno-constant-logical-operand -Wno-unknown-w # odbc set (SRCS - ${LIBRARY_DIR}/DriverManager/__attribute.c - ${LIBRARY_DIR}/DriverManager/__connection.c - ${LIBRARY_DIR}/DriverManager/__handles.c - ${LIBRARY_DIR}/DriverManager/__info.c - ${LIBRARY_DIR}/DriverManager/__stats.c - ${LIBRARY_DIR}/DriverManager/SQLAllocConnect.c - ${LIBRARY_DIR}/DriverManager/SQLAllocEnv.c - ${LIBRARY_DIR}/DriverManager/SQLAllocHandle.c - ${LIBRARY_DIR}/DriverManager/SQLAllocHandleStd.c - ${LIBRARY_DIR}/DriverManager/SQLAllocStmt.c - ${LIBRARY_DIR}/DriverManager/SQLBindCol.c - ${LIBRARY_DIR}/DriverManager/SQLBindParam.c - ${LIBRARY_DIR}/DriverManager/SQLBindParameter.c - ${LIBRARY_DIR}/DriverManager/SQLBrowseConnect.c - ${LIBRARY_DIR}/DriverManager/SQLBrowseConnectW.c - ${LIBRARY_DIR}/DriverManager/SQLBulkOperations.c - ${LIBRARY_DIR}/DriverManager/SQLCancel.c - ${LIBRARY_DIR}/DriverManager/SQLCancelHandle.c - ${LIBRARY_DIR}/DriverManager/SQLCloseCursor.c - ${LIBRARY_DIR}/DriverManager/SQLColAttribute.c - ${LIBRARY_DIR}/DriverManager/SQLColAttributes.c - ${LIBRARY_DIR}/DriverManager/SQLColAttributesW.c - ${LIBRARY_DIR}/DriverManager/SQLColAttributeW.c - ${LIBRARY_DIR}/DriverManager/SQLColumnPrivileges.c - ${LIBRARY_DIR}/DriverManager/SQLColumnPrivilegesW.c - ${LIBRARY_DIR}/DriverManager/SQLColumns.c - ${LIBRARY_DIR}/DriverManager/SQLColumnsW.c - ${LIBRARY_DIR}/DriverManager/SQLConnect.c - ${LIBRARY_DIR}/DriverManager/SQLConnectW.c - ${LIBRARY_DIR}/DriverManager/SQLCopyDesc.c - ${LIBRARY_DIR}/DriverManager/SQLDataSources.c - ${LIBRARY_DIR}/DriverManager/SQLDataSourcesW.c - ${LIBRARY_DIR}/DriverManager/SQLDescribeCol.c - ${LIBRARY_DIR}/DriverManager/SQLDescribeColW.c - ${LIBRARY_DIR}/DriverManager/SQLDescribeParam.c - ${LIBRARY_DIR}/DriverManager/SQLDisconnect.c - ${LIBRARY_DIR}/DriverManager/SQLDriverConnect.c - ${LIBRARY_DIR}/DriverManager/SQLDriverConnectW.c - ${LIBRARY_DIR}/DriverManager/SQLDrivers.c - ${LIBRARY_DIR}/DriverManager/SQLDriversW.c - ${LIBRARY_DIR}/DriverManager/SQLEndTran.c - ${LIBRARY_DIR}/DriverManager/SQLError.c - ${LIBRARY_DIR}/DriverManager/SQLErrorW.c - ${LIBRARY_DIR}/DriverManager/SQLExecDirect.c - ${LIBRARY_DIR}/DriverManager/SQLExecDirectW.c - ${LIBRARY_DIR}/DriverManager/SQLExecute.c - ${LIBRARY_DIR}/DriverManager/SQLExtendedFetch.c - ${LIBRARY_DIR}/DriverManager/SQLFetch.c - ${LIBRARY_DIR}/DriverManager/SQLFetchScroll.c - ${LIBRARY_DIR}/DriverManager/SQLForeignKeys.c - ${LIBRARY_DIR}/DriverManager/SQLForeignKeysW.c - ${LIBRARY_DIR}/DriverManager/SQLFreeConnect.c - ${LIBRARY_DIR}/DriverManager/SQLFreeEnv.c - ${LIBRARY_DIR}/DriverManager/SQLFreeHandle.c - ${LIBRARY_DIR}/DriverManager/SQLFreeStmt.c - ${LIBRARY_DIR}/DriverManager/SQLGetConnectAttr.c - ${LIBRARY_DIR}/DriverManager/SQLGetConnectAttrW.c - ${LIBRARY_DIR}/DriverManager/SQLGetConnectOption.c - ${LIBRARY_DIR}/DriverManager/SQLGetConnectOptionW.c - ${LIBRARY_DIR}/DriverManager/SQLGetCursorName.c - ${LIBRARY_DIR}/DriverManager/SQLGetCursorNameW.c - ${LIBRARY_DIR}/DriverManager/SQLGetData.c - ${LIBRARY_DIR}/DriverManager/SQLGetDescField.c - ${LIBRARY_DIR}/DriverManager/SQLGetDescFieldW.c - ${LIBRARY_DIR}/DriverManager/SQLGetDescRec.c - ${LIBRARY_DIR}/DriverManager/SQLGetDescRecW.c - ${LIBRARY_DIR}/DriverManager/SQLGetDiagField.c - ${LIBRARY_DIR}/DriverManager/SQLGetDiagFieldW.c - ${LIBRARY_DIR}/DriverManager/SQLGetDiagRec.c - ${LIBRARY_DIR}/DriverManager/SQLGetDiagRecW.c - ${LIBRARY_DIR}/DriverManager/SQLGetEnvAttr.c - ${LIBRARY_DIR}/DriverManager/SQLGetFunctions.c - ${LIBRARY_DIR}/DriverManager/SQLGetInfo.c - ${LIBRARY_DIR}/DriverManager/SQLGetInfoW.c - ${LIBRARY_DIR}/DriverManager/SQLGetStmtAttr.c - ${LIBRARY_DIR}/DriverManager/SQLGetStmtAttrW.c - ${LIBRARY_DIR}/DriverManager/SQLGetStmtOption.c - ${LIBRARY_DIR}/DriverManager/SQLGetTypeInfo.c - ${LIBRARY_DIR}/DriverManager/SQLGetTypeInfoW.c - ${LIBRARY_DIR}/DriverManager/SQLMoreResults.c - ${LIBRARY_DIR}/DriverManager/SQLNativeSql.c - ${LIBRARY_DIR}/DriverManager/SQLNativeSqlW.c - ${LIBRARY_DIR}/DriverManager/SQLNumParams.c - ${LIBRARY_DIR}/DriverManager/SQLNumResultCols.c - ${LIBRARY_DIR}/DriverManager/SQLParamData.c - ${LIBRARY_DIR}/DriverManager/SQLParamOptions.c - ${LIBRARY_DIR}/DriverManager/SQLPrepare.c - ${LIBRARY_DIR}/DriverManager/SQLPrepareW.c - ${LIBRARY_DIR}/DriverManager/SQLPrimaryKeys.c - ${LIBRARY_DIR}/DriverManager/SQLPrimaryKeysW.c - ${LIBRARY_DIR}/DriverManager/SQLProcedureColumns.c - ${LIBRARY_DIR}/DriverManager/SQLProcedureColumnsW.c - ${LIBRARY_DIR}/DriverManager/SQLProcedures.c - ${LIBRARY_DIR}/DriverManager/SQLProceduresW.c - ${LIBRARY_DIR}/DriverManager/SQLPutData.c - ${LIBRARY_DIR}/DriverManager/SQLRowCount.c - ${LIBRARY_DIR}/DriverManager/SQLSetConnectAttr.c - ${LIBRARY_DIR}/DriverManager/SQLSetConnectAttrW.c - ${LIBRARY_DIR}/DriverManager/SQLSetConnectOption.c - ${LIBRARY_DIR}/DriverManager/SQLSetConnectOptionW.c - ${LIBRARY_DIR}/DriverManager/SQLSetCursorName.c - ${LIBRARY_DIR}/DriverManager/SQLSetCursorNameW.c - ${LIBRARY_DIR}/DriverManager/SQLSetDescField.c - ${LIBRARY_DIR}/DriverManager/SQLSetDescFieldW.c - ${LIBRARY_DIR}/DriverManager/SQLSetDescRec.c - ${LIBRARY_DIR}/DriverManager/SQLSetEnvAttr.c - ${LIBRARY_DIR}/DriverManager/SQLSetParam.c - ${LIBRARY_DIR}/DriverManager/SQLSetPos.c - ${LIBRARY_DIR}/DriverManager/SQLSetScrollOptions.c - ${LIBRARY_DIR}/DriverManager/SQLSetStmtAttr.c - ${LIBRARY_DIR}/DriverManager/SQLSetStmtAttrW.c - ${LIBRARY_DIR}/DriverManager/SQLSetStmtOption.c - ${LIBRARY_DIR}/DriverManager/SQLSetStmtOptionW.c - ${LIBRARY_DIR}/DriverManager/SQLSpecialColumns.c - ${LIBRARY_DIR}/DriverManager/SQLSpecialColumnsW.c - ${LIBRARY_DIR}/DriverManager/SQLStatistics.c - ${LIBRARY_DIR}/DriverManager/SQLStatisticsW.c - ${LIBRARY_DIR}/DriverManager/SQLTablePrivileges.c - ${LIBRARY_DIR}/DriverManager/SQLTablePrivilegesW.c - ${LIBRARY_DIR}/DriverManager/SQLTables.c - ${LIBRARY_DIR}/DriverManager/SQLTablesW.c - ${LIBRARY_DIR}/DriverManager/SQLTransact.c - ${LIBRARY_DIR}/ini/_iniDump.c - ${LIBRARY_DIR}/ini/_iniObjectRead.c - ${LIBRARY_DIR}/ini/_iniPropertyRead.c - ${LIBRARY_DIR}/ini/_iniScanUntilObject.c - ${LIBRARY_DIR}/ini/iniAllTrim.c - ${LIBRARY_DIR}/ini/iniAppend.c - ${LIBRARY_DIR}/ini/iniClose.c - ${LIBRARY_DIR}/ini/iniCommit.c - ${LIBRARY_DIR}/ini/iniCursor.c - ${LIBRARY_DIR}/ini/iniDelete.c - ${LIBRARY_DIR}/ini/iniElement.c - ${LIBRARY_DIR}/ini/iniElementCount.c - ${LIBRARY_DIR}/ini/iniGetBookmark.c - ${LIBRARY_DIR}/ini/iniGotoBookmark.c - ${LIBRARY_DIR}/ini/iniObject.c - ${LIBRARY_DIR}/ini/iniObjectDelete.c - ${LIBRARY_DIR}/ini/iniObjectEOL.c - ${LIBRARY_DIR}/ini/iniObjectFirst.c - ${LIBRARY_DIR}/ini/iniObjectInsert.c - ${LIBRARY_DIR}/ini/iniObjectLast.c - ${LIBRARY_DIR}/ini/iniObjectNext.c - ${LIBRARY_DIR}/ini/iniObjectSeek.c - ${LIBRARY_DIR}/ini/iniObjectSeekSure.c - ${LIBRARY_DIR}/ini/iniObjectUpdate.c - ${LIBRARY_DIR}/ini/iniOpen.c - ${LIBRARY_DIR}/ini/iniProperty.c - ${LIBRARY_DIR}/ini/iniPropertyDelete.c - ${LIBRARY_DIR}/ini/iniPropertyEOL.c - ${LIBRARY_DIR}/ini/iniPropertyFirst.c - ${LIBRARY_DIR}/ini/iniPropertyInsert.c - ${LIBRARY_DIR}/ini/iniPropertyLast.c - ${LIBRARY_DIR}/ini/iniPropertyNext.c - ${LIBRARY_DIR}/ini/iniPropertySeek.c - ${LIBRARY_DIR}/ini/iniPropertySeekSure.c - ${LIBRARY_DIR}/ini/iniPropertyUpdate.c - ${LIBRARY_DIR}/ini/iniPropertyValue.c - ${LIBRARY_DIR}/ini/iniToUpper.c - ${LIBRARY_DIR}/ini/iniValue.c - ${LIBRARY_DIR}/log/_logFreeMsg.c - ${LIBRARY_DIR}/log/logClear.c - ${LIBRARY_DIR}/log/logClose.c - ${LIBRARY_DIR}/log/logOn.c - ${LIBRARY_DIR}/log/logOpen.c - ${LIBRARY_DIR}/log/logPeekMsg.c - ${LIBRARY_DIR}/log/logPopMsg.c - ${LIBRARY_DIR}/log/logPushMsg.c - ${LIBRARY_DIR}/lst/_lstAdjustCurrent.c - ${LIBRARY_DIR}/lst/_lstDump.c - ${LIBRARY_DIR}/lst/_lstFreeItem.c - ${LIBRARY_DIR}/lst/_lstNextValidItem.c - ${LIBRARY_DIR}/lst/_lstPrevValidItem.c - ${LIBRARY_DIR}/lst/_lstVisible.c - ${LIBRARY_DIR}/lst/lstAppend.c - ${LIBRARY_DIR}/lst/lstClose.c - ${LIBRARY_DIR}/lst/lstDelete.c - ${LIBRARY_DIR}/lst/lstEOL.c - ${LIBRARY_DIR}/lst/lstFirst.c - ${LIBRARY_DIR}/lst/lstGet.c - ${LIBRARY_DIR}/lst/lstGetBookMark.c - ${LIBRARY_DIR}/lst/lstGoto.c - ${LIBRARY_DIR}/lst/lstGotoBookMark.c - ${LIBRARY_DIR}/lst/lstInsert.c - ${LIBRARY_DIR}/lst/lstLast.c - ${LIBRARY_DIR}/lst/lstNext.c - ${LIBRARY_DIR}/lst/lstOpen.c - ${LIBRARY_DIR}/lst/lstOpenCursor.c - ${LIBRARY_DIR}/lst/lstPrev.c - ${LIBRARY_DIR}/lst/lstSeek.c - ${LIBRARY_DIR}/lst/lstSeekItem.c - ${LIBRARY_DIR}/lst/lstSet.c - ${LIBRARY_DIR}/lst/lstSetFreeFunc.c - ${LIBRARY_DIR}/odbcinst/_logging.c - ${LIBRARY_DIR}/odbcinst/_odbcinst_ConfigModeINI.c - ${LIBRARY_DIR}/odbcinst/_odbcinst_GetEntries.c - ${LIBRARY_DIR}/odbcinst/_odbcinst_GetSections.c - ${LIBRARY_DIR}/odbcinst/_odbcinst_SystemINI.c - ${LIBRARY_DIR}/odbcinst/_odbcinst_UserINI.c - ${LIBRARY_DIR}/odbcinst/_SQLDriverConnectPrompt.c - ${LIBRARY_DIR}/odbcinst/_SQLGetInstalledDrivers.c - ${LIBRARY_DIR}/odbcinst/_SQLWriteInstalledDrivers.c - ${LIBRARY_DIR}/odbcinst/ODBCINSTConstructProperties.c - ${LIBRARY_DIR}/odbcinst/ODBCINSTDestructProperties.c - ${LIBRARY_DIR}/odbcinst/ODBCINSTSetProperty.c - ${LIBRARY_DIR}/odbcinst/ODBCINSTValidateProperties.c - ${LIBRARY_DIR}/odbcinst/ODBCINSTValidateProperty.c - ${LIBRARY_DIR}/odbcinst/SQLConfigDataSource.c - ${LIBRARY_DIR}/odbcinst/SQLConfigDriver.c - ${LIBRARY_DIR}/odbcinst/SQLCreateDataSource.c - ${LIBRARY_DIR}/odbcinst/SQLGetAvailableDrivers.c - ${LIBRARY_DIR}/odbcinst/SQLGetConfigMode.c - ${LIBRARY_DIR}/odbcinst/SQLGetInstalledDrivers.c - ${LIBRARY_DIR}/odbcinst/SQLGetPrivateProfileString.c - ${LIBRARY_DIR}/odbcinst/SQLGetTranslator.c - ${LIBRARY_DIR}/odbcinst/SQLInstallDriverEx.c - ${LIBRARY_DIR}/odbcinst/SQLInstallDriverManager.c - ${LIBRARY_DIR}/odbcinst/SQLInstallerError.c - ${LIBRARY_DIR}/odbcinst/SQLInstallODBC.c - ${LIBRARY_DIR}/odbcinst/SQLInstallTranslatorEx.c - ${LIBRARY_DIR}/odbcinst/SQLManageDataSources.c - ${LIBRARY_DIR}/odbcinst/SQLPostInstallerError.c - ${LIBRARY_DIR}/odbcinst/SQLReadFileDSN.c - ${LIBRARY_DIR}/odbcinst/SQLRemoveDriver.c - ${LIBRARY_DIR}/odbcinst/SQLRemoveDriverManager.c - ${LIBRARY_DIR}/odbcinst/SQLRemoveDSNFromIni.c - ${LIBRARY_DIR}/odbcinst/SQLRemoveTranslator.c - ${LIBRARY_DIR}/odbcinst/SQLSetConfigMode.c - ${LIBRARY_DIR}/odbcinst/SQLValidDSN.c - ${LIBRARY_DIR}/odbcinst/SQLWriteDSNToIni.c - ${LIBRARY_DIR}/odbcinst/SQLWriteFileDSN.c - ${LIBRARY_DIR}/odbcinst/SQLWritePrivateProfileString.c + "${LIBRARY_DIR}/DriverManager/__attribute.c" + "${LIBRARY_DIR}/DriverManager/__connection.c" + "${LIBRARY_DIR}/DriverManager/__handles.c" + "${LIBRARY_DIR}/DriverManager/__info.c" + "${LIBRARY_DIR}/DriverManager/__stats.c" + "${LIBRARY_DIR}/DriverManager/SQLAllocConnect.c" + "${LIBRARY_DIR}/DriverManager/SQLAllocEnv.c" + "${LIBRARY_DIR}/DriverManager/SQLAllocHandle.c" + "${LIBRARY_DIR}/DriverManager/SQLAllocHandleStd.c" + "${LIBRARY_DIR}/DriverManager/SQLAllocStmt.c" + "${LIBRARY_DIR}/DriverManager/SQLBindCol.c" + "${LIBRARY_DIR}/DriverManager/SQLBindParam.c" + "${LIBRARY_DIR}/DriverManager/SQLBindParameter.c" + "${LIBRARY_DIR}/DriverManager/SQLBrowseConnect.c" + "${LIBRARY_DIR}/DriverManager/SQLBrowseConnectW.c" + "${LIBRARY_DIR}/DriverManager/SQLBulkOperations.c" + "${LIBRARY_DIR}/DriverManager/SQLCancel.c" + "${LIBRARY_DIR}/DriverManager/SQLCancelHandle.c" + "${LIBRARY_DIR}/DriverManager/SQLCloseCursor.c" + "${LIBRARY_DIR}/DriverManager/SQLColAttribute.c" + "${LIBRARY_DIR}/DriverManager/SQLColAttributes.c" + "${LIBRARY_DIR}/DriverManager/SQLColAttributesW.c" + "${LIBRARY_DIR}/DriverManager/SQLColAttributeW.c" + "${LIBRARY_DIR}/DriverManager/SQLColumnPrivileges.c" + "${LIBRARY_DIR}/DriverManager/SQLColumnPrivilegesW.c" + "${LIBRARY_DIR}/DriverManager/SQLColumns.c" + "${LIBRARY_DIR}/DriverManager/SQLColumnsW.c" + "${LIBRARY_DIR}/DriverManager/SQLConnect.c" + "${LIBRARY_DIR}/DriverManager/SQLConnectW.c" + "${LIBRARY_DIR}/DriverManager/SQLCopyDesc.c" + "${LIBRARY_DIR}/DriverManager/SQLDataSources.c" + "${LIBRARY_DIR}/DriverManager/SQLDataSourcesW.c" + "${LIBRARY_DIR}/DriverManager/SQLDescribeCol.c" + "${LIBRARY_DIR}/DriverManager/SQLDescribeColW.c" + "${LIBRARY_DIR}/DriverManager/SQLDescribeParam.c" + "${LIBRARY_DIR}/DriverManager/SQLDisconnect.c" + "${LIBRARY_DIR}/DriverManager/SQLDriverConnect.c" + "${LIBRARY_DIR}/DriverManager/SQLDriverConnectW.c" + "${LIBRARY_DIR}/DriverManager/SQLDrivers.c" + "${LIBRARY_DIR}/DriverManager/SQLDriversW.c" + "${LIBRARY_DIR}/DriverManager/SQLEndTran.c" + "${LIBRARY_DIR}/DriverManager/SQLError.c" + "${LIBRARY_DIR}/DriverManager/SQLErrorW.c" + "${LIBRARY_DIR}/DriverManager/SQLExecDirect.c" + "${LIBRARY_DIR}/DriverManager/SQLExecDirectW.c" + "${LIBRARY_DIR}/DriverManager/SQLExecute.c" + "${LIBRARY_DIR}/DriverManager/SQLExtendedFetch.c" + "${LIBRARY_DIR}/DriverManager/SQLFetch.c" + "${LIBRARY_DIR}/DriverManager/SQLFetchScroll.c" + "${LIBRARY_DIR}/DriverManager/SQLForeignKeys.c" + "${LIBRARY_DIR}/DriverManager/SQLForeignKeysW.c" + "${LIBRARY_DIR}/DriverManager/SQLFreeConnect.c" + "${LIBRARY_DIR}/DriverManager/SQLFreeEnv.c" + "${LIBRARY_DIR}/DriverManager/SQLFreeHandle.c" + "${LIBRARY_DIR}/DriverManager/SQLFreeStmt.c" + "${LIBRARY_DIR}/DriverManager/SQLGetConnectAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLGetConnectAttrW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetConnectOption.c" + "${LIBRARY_DIR}/DriverManager/SQLGetConnectOptionW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetCursorName.c" + "${LIBRARY_DIR}/DriverManager/SQLGetCursorNameW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetData.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDescField.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDescFieldW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDescRec.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDescRecW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDiagField.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDiagFieldW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDiagRec.c" + "${LIBRARY_DIR}/DriverManager/SQLGetDiagRecW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetEnvAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLGetFunctions.c" + "${LIBRARY_DIR}/DriverManager/SQLGetInfo.c" + "${LIBRARY_DIR}/DriverManager/SQLGetInfoW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetStmtAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLGetStmtAttrW.c" + "${LIBRARY_DIR}/DriverManager/SQLGetStmtOption.c" + "${LIBRARY_DIR}/DriverManager/SQLGetTypeInfo.c" + "${LIBRARY_DIR}/DriverManager/SQLGetTypeInfoW.c" + "${LIBRARY_DIR}/DriverManager/SQLMoreResults.c" + "${LIBRARY_DIR}/DriverManager/SQLNativeSql.c" + "${LIBRARY_DIR}/DriverManager/SQLNativeSqlW.c" + "${LIBRARY_DIR}/DriverManager/SQLNumParams.c" + "${LIBRARY_DIR}/DriverManager/SQLNumResultCols.c" + "${LIBRARY_DIR}/DriverManager/SQLParamData.c" + "${LIBRARY_DIR}/DriverManager/SQLParamOptions.c" + "${LIBRARY_DIR}/DriverManager/SQLPrepare.c" + "${LIBRARY_DIR}/DriverManager/SQLPrepareW.c" + "${LIBRARY_DIR}/DriverManager/SQLPrimaryKeys.c" + "${LIBRARY_DIR}/DriverManager/SQLPrimaryKeysW.c" + "${LIBRARY_DIR}/DriverManager/SQLProcedureColumns.c" + "${LIBRARY_DIR}/DriverManager/SQLProcedureColumnsW.c" + "${LIBRARY_DIR}/DriverManager/SQLProcedures.c" + "${LIBRARY_DIR}/DriverManager/SQLProceduresW.c" + "${LIBRARY_DIR}/DriverManager/SQLPutData.c" + "${LIBRARY_DIR}/DriverManager/SQLRowCount.c" + "${LIBRARY_DIR}/DriverManager/SQLSetConnectAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLSetConnectAttrW.c" + "${LIBRARY_DIR}/DriverManager/SQLSetConnectOption.c" + "${LIBRARY_DIR}/DriverManager/SQLSetConnectOptionW.c" + "${LIBRARY_DIR}/DriverManager/SQLSetCursorName.c" + "${LIBRARY_DIR}/DriverManager/SQLSetCursorNameW.c" + "${LIBRARY_DIR}/DriverManager/SQLSetDescField.c" + "${LIBRARY_DIR}/DriverManager/SQLSetDescFieldW.c" + "${LIBRARY_DIR}/DriverManager/SQLSetDescRec.c" + "${LIBRARY_DIR}/DriverManager/SQLSetEnvAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLSetParam.c" + "${LIBRARY_DIR}/DriverManager/SQLSetPos.c" + "${LIBRARY_DIR}/DriverManager/SQLSetScrollOptions.c" + "${LIBRARY_DIR}/DriverManager/SQLSetStmtAttr.c" + "${LIBRARY_DIR}/DriverManager/SQLSetStmtAttrW.c" + "${LIBRARY_DIR}/DriverManager/SQLSetStmtOption.c" + "${LIBRARY_DIR}/DriverManager/SQLSetStmtOptionW.c" + "${LIBRARY_DIR}/DriverManager/SQLSpecialColumns.c" + "${LIBRARY_DIR}/DriverManager/SQLSpecialColumnsW.c" + "${LIBRARY_DIR}/DriverManager/SQLStatistics.c" + "${LIBRARY_DIR}/DriverManager/SQLStatisticsW.c" + "${LIBRARY_DIR}/DriverManager/SQLTablePrivileges.c" + "${LIBRARY_DIR}/DriverManager/SQLTablePrivilegesW.c" + "${LIBRARY_DIR}/DriverManager/SQLTables.c" + "${LIBRARY_DIR}/DriverManager/SQLTablesW.c" + "${LIBRARY_DIR}/DriverManager/SQLTransact.c" + "${LIBRARY_DIR}/ini/_iniDump.c" + "${LIBRARY_DIR}/ini/_iniObjectRead.c" + "${LIBRARY_DIR}/ini/_iniPropertyRead.c" + "${LIBRARY_DIR}/ini/_iniScanUntilObject.c" + "${LIBRARY_DIR}/ini/iniAllTrim.c" + "${LIBRARY_DIR}/ini/iniAppend.c" + "${LIBRARY_DIR}/ini/iniClose.c" + "${LIBRARY_DIR}/ini/iniCommit.c" + "${LIBRARY_DIR}/ini/iniCursor.c" + "${LIBRARY_DIR}/ini/iniDelete.c" + "${LIBRARY_DIR}/ini/iniElement.c" + "${LIBRARY_DIR}/ini/iniElementCount.c" + "${LIBRARY_DIR}/ini/iniGetBookmark.c" + "${LIBRARY_DIR}/ini/iniGotoBookmark.c" + "${LIBRARY_DIR}/ini/iniObject.c" + "${LIBRARY_DIR}/ini/iniObjectDelete.c" + "${LIBRARY_DIR}/ini/iniObjectEOL.c" + "${LIBRARY_DIR}/ini/iniObjectFirst.c" + "${LIBRARY_DIR}/ini/iniObjectInsert.c" + "${LIBRARY_DIR}/ini/iniObjectLast.c" + "${LIBRARY_DIR}/ini/iniObjectNext.c" + "${LIBRARY_DIR}/ini/iniObjectSeek.c" + "${LIBRARY_DIR}/ini/iniObjectSeekSure.c" + "${LIBRARY_DIR}/ini/iniObjectUpdate.c" + "${LIBRARY_DIR}/ini/iniOpen.c" + "${LIBRARY_DIR}/ini/iniProperty.c" + "${LIBRARY_DIR}/ini/iniPropertyDelete.c" + "${LIBRARY_DIR}/ini/iniPropertyEOL.c" + "${LIBRARY_DIR}/ini/iniPropertyFirst.c" + "${LIBRARY_DIR}/ini/iniPropertyInsert.c" + "${LIBRARY_DIR}/ini/iniPropertyLast.c" + "${LIBRARY_DIR}/ini/iniPropertyNext.c" + "${LIBRARY_DIR}/ini/iniPropertySeek.c" + "${LIBRARY_DIR}/ini/iniPropertySeekSure.c" + "${LIBRARY_DIR}/ini/iniPropertyUpdate.c" + "${LIBRARY_DIR}/ini/iniPropertyValue.c" + "${LIBRARY_DIR}/ini/iniToUpper.c" + "${LIBRARY_DIR}/ini/iniValue.c" + "${LIBRARY_DIR}/log/_logFreeMsg.c" + "${LIBRARY_DIR}/log/logClear.c" + "${LIBRARY_DIR}/log/logClose.c" + "${LIBRARY_DIR}/log/logOn.c" + "${LIBRARY_DIR}/log/logOpen.c" + "${LIBRARY_DIR}/log/logPeekMsg.c" + "${LIBRARY_DIR}/log/logPopMsg.c" + "${LIBRARY_DIR}/log/logPushMsg.c" + "${LIBRARY_DIR}/lst/_lstAdjustCurrent.c" + "${LIBRARY_DIR}/lst/_lstDump.c" + "${LIBRARY_DIR}/lst/_lstFreeItem.c" + "${LIBRARY_DIR}/lst/_lstNextValidItem.c" + "${LIBRARY_DIR}/lst/_lstPrevValidItem.c" + "${LIBRARY_DIR}/lst/_lstVisible.c" + "${LIBRARY_DIR}/lst/lstAppend.c" + "${LIBRARY_DIR}/lst/lstClose.c" + "${LIBRARY_DIR}/lst/lstDelete.c" + "${LIBRARY_DIR}/lst/lstEOL.c" + "${LIBRARY_DIR}/lst/lstFirst.c" + "${LIBRARY_DIR}/lst/lstGet.c" + "${LIBRARY_DIR}/lst/lstGetBookMark.c" + "${LIBRARY_DIR}/lst/lstGoto.c" + "${LIBRARY_DIR}/lst/lstGotoBookMark.c" + "${LIBRARY_DIR}/lst/lstInsert.c" + "${LIBRARY_DIR}/lst/lstLast.c" + "${LIBRARY_DIR}/lst/lstNext.c" + "${LIBRARY_DIR}/lst/lstOpen.c" + "${LIBRARY_DIR}/lst/lstOpenCursor.c" + "${LIBRARY_DIR}/lst/lstPrev.c" + "${LIBRARY_DIR}/lst/lstSeek.c" + "${LIBRARY_DIR}/lst/lstSeekItem.c" + "${LIBRARY_DIR}/lst/lstSet.c" + "${LIBRARY_DIR}/lst/lstSetFreeFunc.c" + "${LIBRARY_DIR}/odbcinst/_logging.c" + "${LIBRARY_DIR}/odbcinst/_odbcinst_ConfigModeINI.c" + "${LIBRARY_DIR}/odbcinst/_odbcinst_GetEntries.c" + "${LIBRARY_DIR}/odbcinst/_odbcinst_GetSections.c" + "${LIBRARY_DIR}/odbcinst/_odbcinst_SystemINI.c" + "${LIBRARY_DIR}/odbcinst/_odbcinst_UserINI.c" + "${LIBRARY_DIR}/odbcinst/_SQLDriverConnectPrompt.c" + "${LIBRARY_DIR}/odbcinst/_SQLGetInstalledDrivers.c" + "${LIBRARY_DIR}/odbcinst/_SQLWriteInstalledDrivers.c" + "${LIBRARY_DIR}/odbcinst/ODBCINSTConstructProperties.c" + "${LIBRARY_DIR}/odbcinst/ODBCINSTDestructProperties.c" + "${LIBRARY_DIR}/odbcinst/ODBCINSTSetProperty.c" + "${LIBRARY_DIR}/odbcinst/ODBCINSTValidateProperties.c" + "${LIBRARY_DIR}/odbcinst/ODBCINSTValidateProperty.c" + "${LIBRARY_DIR}/odbcinst/SQLConfigDataSource.c" + "${LIBRARY_DIR}/odbcinst/SQLConfigDriver.c" + "${LIBRARY_DIR}/odbcinst/SQLCreateDataSource.c" + "${LIBRARY_DIR}/odbcinst/SQLGetAvailableDrivers.c" + "${LIBRARY_DIR}/odbcinst/SQLGetConfigMode.c" + "${LIBRARY_DIR}/odbcinst/SQLGetInstalledDrivers.c" + "${LIBRARY_DIR}/odbcinst/SQLGetPrivateProfileString.c" + "${LIBRARY_DIR}/odbcinst/SQLGetTranslator.c" + "${LIBRARY_DIR}/odbcinst/SQLInstallDriverEx.c" + "${LIBRARY_DIR}/odbcinst/SQLInstallDriverManager.c" + "${LIBRARY_DIR}/odbcinst/SQLInstallerError.c" + "${LIBRARY_DIR}/odbcinst/SQLInstallODBC.c" + "${LIBRARY_DIR}/odbcinst/SQLInstallTranslatorEx.c" + "${LIBRARY_DIR}/odbcinst/SQLManageDataSources.c" + "${LIBRARY_DIR}/odbcinst/SQLPostInstallerError.c" + "${LIBRARY_DIR}/odbcinst/SQLReadFileDSN.c" + "${LIBRARY_DIR}/odbcinst/SQLRemoveDriver.c" + "${LIBRARY_DIR}/odbcinst/SQLRemoveDriverManager.c" + "${LIBRARY_DIR}/odbcinst/SQLRemoveDSNFromIni.c" + "${LIBRARY_DIR}/odbcinst/SQLRemoveTranslator.c" + "${LIBRARY_DIR}/odbcinst/SQLSetConfigMode.c" + "${LIBRARY_DIR}/odbcinst/SQLValidDSN.c" + "${LIBRARY_DIR}/odbcinst/SQLWriteDSNToIni.c" + "${LIBRARY_DIR}/odbcinst/SQLWriteFileDSN.c" + "${LIBRARY_DIR}/odbcinst/SQLWritePrivateProfileString.c" ) add_library (unixodbc ${SRCS}) @@ -280,7 +280,7 @@ target_include_directories (unixodbc linux_x86_64/private PUBLIC linux_x86_64 - ${LIBRARY_DIR}/include + "${LIBRARY_DIR}/include" ) target_compile_definitions (unixodbc PRIVATE -DHAVE_CONFIG_H) target_compile_options (unixodbc diff --git a/contrib/zlib-ng b/contrib/zlib-ng index 6fd1846c8b8..db232d30b4c 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit 6fd1846c8b8f59436fe2dd752d0f316ddbb64df6 +Subproject commit db232d30b4c72fd58e6d7eae2d12cebf9c3d90db diff --git a/contrib/zstd b/contrib/zstd index 10f0e6993f9..a488ba114ec 160000 --- a/contrib/zstd +++ b/contrib/zstd @@ -1 +1 @@ -Subproject commit 10f0e6993f9d2f682da6d04aa2385b7d53cbb4ee +Subproject commit a488ba114ec17ea1054b9057c26a046fc122b3b6 diff --git a/contrib/zstd-cmake/CMakeLists.txt b/contrib/zstd-cmake/CMakeLists.txt index 58a827761ea..226ee1a8067 100644 --- a/contrib/zstd-cmake/CMakeLists.txt +++ b/contrib/zstd-cmake/CMakeLists.txt @@ -39,108 +39,113 @@ function(GetLibraryVersion _content _outputVar1 _outputVar2 _outputVar3) endfunction() # Define library directory, where sources and header files are located -SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/zstd/lib) -INCLUDE_DIRECTORIES(BEFORE ${LIBRARY_DIR} ${LIBRARY_DIR}/common) +SET(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/zstd/lib") +INCLUDE_DIRECTORIES(BEFORE ${LIBRARY_DIR} "${LIBRARY_DIR}/common") # Read file content -FILE(READ ${LIBRARY_DIR}/zstd.h HEADER_CONTENT) +FILE(READ "${LIBRARY_DIR}/zstd.h" HEADER_CONTENT) # Parse version GetLibraryVersion("${HEADER_CONTENT}" LIBVER_MAJOR LIBVER_MINOR LIBVER_RELEASE) MESSAGE(STATUS "ZSTD VERSION ${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE}") # cd contrib/zstd/lib -# find . -name '*.c' | grep -vP 'deprecated|legacy' | sort | sed 's/^\./ ${LIBRARY_DIR}/' +# find . -name '*.c' | grep -vP 'deprecated|legacy' | sort | sed 's/^\./ "${LIBRARY_DIR}/"' SET(Sources - ${LIBRARY_DIR}/common/debug.c - ${LIBRARY_DIR}/common/entropy_common.c - ${LIBRARY_DIR}/common/error_private.c - ${LIBRARY_DIR}/common/fse_decompress.c - ${LIBRARY_DIR}/common/pool.c - ${LIBRARY_DIR}/common/threading.c - ${LIBRARY_DIR}/common/xxhash.c - ${LIBRARY_DIR}/common/zstd_common.c - ${LIBRARY_DIR}/compress/fse_compress.c - ${LIBRARY_DIR}/compress/hist.c - ${LIBRARY_DIR}/compress/huf_compress.c - ${LIBRARY_DIR}/compress/zstd_compress.c - ${LIBRARY_DIR}/compress/zstd_compress_literals.c - ${LIBRARY_DIR}/compress/zstd_compress_sequences.c - ${LIBRARY_DIR}/compress/zstd_double_fast.c - ${LIBRARY_DIR}/compress/zstd_fast.c - ${LIBRARY_DIR}/compress/zstd_lazy.c - ${LIBRARY_DIR}/compress/zstd_ldm.c - ${LIBRARY_DIR}/compress/zstdmt_compress.c - ${LIBRARY_DIR}/compress/zstd_opt.c - ${LIBRARY_DIR}/decompress/huf_decompress.c - ${LIBRARY_DIR}/decompress/zstd_ddict.c - ${LIBRARY_DIR}/decompress/zstd_decompress_block.c - ${LIBRARY_DIR}/decompress/zstd_decompress.c - ${LIBRARY_DIR}/dictBuilder/cover.c - ${LIBRARY_DIR}/dictBuilder/divsufsort.c - ${LIBRARY_DIR}/dictBuilder/fastcover.c - ${LIBRARY_DIR}/dictBuilder/zdict.c) + "${LIBRARY_DIR}/common/debug.c" + "${LIBRARY_DIR}/common/entropy_common.c" + "${LIBRARY_DIR}/common/error_private.c" + "${LIBRARY_DIR}/common/fse_decompress.c" + "${LIBRARY_DIR}/common/pool.c" + "${LIBRARY_DIR}/common/threading.c" + "${LIBRARY_DIR}/common/xxhash.c" + "${LIBRARY_DIR}/common/zstd_common.c" + "${LIBRARY_DIR}/compress/fse_compress.c" + "${LIBRARY_DIR}/compress/hist.c" + "${LIBRARY_DIR}/compress/huf_compress.c" + "${LIBRARY_DIR}/compress/zstd_compress.c" + "${LIBRARY_DIR}/compress/zstd_compress_literals.c" + "${LIBRARY_DIR}/compress/zstd_compress_sequences.c" + "${LIBRARY_DIR}/compress/zstd_compress_superblock.c" + "${LIBRARY_DIR}/compress/zstd_double_fast.c" + "${LIBRARY_DIR}/compress/zstd_fast.c" + "${LIBRARY_DIR}/compress/zstd_lazy.c" + "${LIBRARY_DIR}/compress/zstd_ldm.c" + "${LIBRARY_DIR}/compress/zstdmt_compress.c" + "${LIBRARY_DIR}/compress/zstd_opt.c" + "${LIBRARY_DIR}/decompress/huf_decompress.c" + "${LIBRARY_DIR}/decompress/zstd_ddict.c" + "${LIBRARY_DIR}/decompress/zstd_decompress_block.c" + "${LIBRARY_DIR}/decompress/zstd_decompress.c" + "${LIBRARY_DIR}/dictBuilder/cover.c" + "${LIBRARY_DIR}/dictBuilder/divsufsort.c" + "${LIBRARY_DIR}/dictBuilder/fastcover.c" + "${LIBRARY_DIR}/dictBuilder/zdict.c") # cd contrib/zstd/lib -# find . -name '*.h' | grep -vP 'deprecated|legacy' | sort | sed 's/^\./ ${LIBRARY_DIR}/' +# find . -name '*.h' | grep -vP 'deprecated|legacy' | sort | sed 's/^\./ "${LIBRARY_DIR}/"' SET(Headers - ${LIBRARY_DIR}/common/bitstream.h - ${LIBRARY_DIR}/common/compiler.h - ${LIBRARY_DIR}/common/cpu.h - ${LIBRARY_DIR}/common/debug.h - ${LIBRARY_DIR}/common/error_private.h - ${LIBRARY_DIR}/common/fse.h - ${LIBRARY_DIR}/common/huf.h - ${LIBRARY_DIR}/common/mem.h - ${LIBRARY_DIR}/common/pool.h - ${LIBRARY_DIR}/common/threading.h - ${LIBRARY_DIR}/common/xxhash.h - ${LIBRARY_DIR}/common/zstd_errors.h - ${LIBRARY_DIR}/common/zstd_internal.h - ${LIBRARY_DIR}/compress/hist.h - ${LIBRARY_DIR}/compress/zstd_compress_internal.h - ${LIBRARY_DIR}/compress/zstd_compress_literals.h - ${LIBRARY_DIR}/compress/zstd_compress_sequences.h - ${LIBRARY_DIR}/compress/zstd_cwksp.h - ${LIBRARY_DIR}/compress/zstd_double_fast.h - ${LIBRARY_DIR}/compress/zstd_fast.h - ${LIBRARY_DIR}/compress/zstd_lazy.h - ${LIBRARY_DIR}/compress/zstd_ldm.h - ${LIBRARY_DIR}/compress/zstdmt_compress.h - ${LIBRARY_DIR}/compress/zstd_opt.h - ${LIBRARY_DIR}/decompress/zstd_ddict.h - ${LIBRARY_DIR}/decompress/zstd_decompress_block.h - ${LIBRARY_DIR}/decompress/zstd_decompress_internal.h - ${LIBRARY_DIR}/dictBuilder/cover.h - ${LIBRARY_DIR}/dictBuilder/divsufsort.h - ${LIBRARY_DIR}/dictBuilder/zdict.h - ${LIBRARY_DIR}/zstd.h) + "${LIBRARY_DIR}/common/bitstream.h" + "${LIBRARY_DIR}/common/compiler.h" + "${LIBRARY_DIR}/common/cpu.h" + "${LIBRARY_DIR}/common/debug.h" + "${LIBRARY_DIR}/common/error_private.h" + "${LIBRARY_DIR}/common/fse.h" + "${LIBRARY_DIR}/common/huf.h" + "${LIBRARY_DIR}/common/mem.h" + "${LIBRARY_DIR}/common/pool.h" + "${LIBRARY_DIR}/common/threading.h" + "${LIBRARY_DIR}/common/xxhash.h" + "${LIBRARY_DIR}/common/zstd_deps.h" + "${LIBRARY_DIR}/common/zstd_internal.h" + "${LIBRARY_DIR}/common/zstd_trace.h" + "${LIBRARY_DIR}/compress/hist.h" + "${LIBRARY_DIR}/compress/zstd_compress_internal.h" + "${LIBRARY_DIR}/compress/zstd_compress_literals.h" + "${LIBRARY_DIR}/compress/zstd_compress_sequences.h" + "${LIBRARY_DIR}/compress/zstd_compress_superblock.h" + "${LIBRARY_DIR}/compress/zstd_cwksp.h" + "${LIBRARY_DIR}/compress/zstd_double_fast.h" + "${LIBRARY_DIR}/compress/zstd_fast.h" + "${LIBRARY_DIR}/compress/zstd_lazy.h" + "${LIBRARY_DIR}/compress/zstd_ldm_geartab.h" + "${LIBRARY_DIR}/compress/zstd_ldm.h" + "${LIBRARY_DIR}/compress/zstdmt_compress.h" + "${LIBRARY_DIR}/compress/zstd_opt.h" + "${LIBRARY_DIR}/decompress/zstd_ddict.h" + "${LIBRARY_DIR}/decompress/zstd_decompress_block.h" + "${LIBRARY_DIR}/decompress/zstd_decompress_internal.h" + "${LIBRARY_DIR}/dictBuilder/cover.h" + "${LIBRARY_DIR}/dictBuilder/divsufsort.h" + "${LIBRARY_DIR}/zdict.h" + "${LIBRARY_DIR}/zstd_errors.h" + "${LIBRARY_DIR}/zstd.h") SET(ZSTD_LEGACY_SUPPORT true) IF (ZSTD_LEGACY_SUPPORT) - SET(LIBRARY_LEGACY_DIR ${LIBRARY_DIR}/legacy) + SET(LIBRARY_LEGACY_DIR "${LIBRARY_DIR}/legacy") INCLUDE_DIRECTORIES(BEFORE ${LIBRARY_LEGACY_DIR}) ADD_DEFINITIONS(-D ZSTD_LEGACY_SUPPORT=1) SET(Sources ${Sources} - ${LIBRARY_LEGACY_DIR}/zstd_v01.c - ${LIBRARY_LEGACY_DIR}/zstd_v02.c - ${LIBRARY_LEGACY_DIR}/zstd_v03.c - ${LIBRARY_LEGACY_DIR}/zstd_v04.c - ${LIBRARY_LEGACY_DIR}/zstd_v05.c - ${LIBRARY_LEGACY_DIR}/zstd_v06.c - ${LIBRARY_LEGACY_DIR}/zstd_v07.c) + "${LIBRARY_LEGACY_DIR}/zstd_v01.c" + "${LIBRARY_LEGACY_DIR}/zstd_v02.c" + "${LIBRARY_LEGACY_DIR}/zstd_v03.c" + "${LIBRARY_LEGACY_DIR}/zstd_v04.c" + "${LIBRARY_LEGACY_DIR}/zstd_v05.c" + "${LIBRARY_LEGACY_DIR}/zstd_v06.c" + "${LIBRARY_LEGACY_DIR}/zstd_v07.c") SET(Headers ${Headers} - ${LIBRARY_LEGACY_DIR}/zstd_legacy.h - ${LIBRARY_LEGACY_DIR}/zstd_v01.h - ${LIBRARY_LEGACY_DIR}/zstd_v02.h - ${LIBRARY_LEGACY_DIR}/zstd_v03.h - ${LIBRARY_LEGACY_DIR}/zstd_v04.h - ${LIBRARY_LEGACY_DIR}/zstd_v05.h - ${LIBRARY_LEGACY_DIR}/zstd_v06.h - ${LIBRARY_LEGACY_DIR}/zstd_v07.h) + "${LIBRARY_LEGACY_DIR}/zstd_legacy.h" + "${LIBRARY_LEGACY_DIR}/zstd_v01.h" + "${LIBRARY_LEGACY_DIR}/zstd_v02.h" + "${LIBRARY_LEGACY_DIR}/zstd_v03.h" + "${LIBRARY_LEGACY_DIR}/zstd_v04.h" + "${LIBRARY_LEGACY_DIR}/zstd_v05.h" + "${LIBRARY_LEGACY_DIR}/zstd_v06.h" + "${LIBRARY_LEGACY_DIR}/zstd_v07.h") ENDIF (ZSTD_LEGACY_SUPPORT) ADD_LIBRARY(zstd ${Sources} ${Headers}) diff --git a/debian/changelog b/debian/changelog index be77dfdefe9..8b6626416a9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (21.5.1.1) unstable; urgency=low +clickhouse (21.6.1.1) unstable; urgency=low * Modified source code - -- clickhouse-release Fri, 02 Apr 2021 18:34:26 +0300 + -- clickhouse-release Tue, 20 Apr 2021 01:48:16 +0300 diff --git a/debian/clickhouse-client.postinst b/debian/clickhouse-client.postinst deleted file mode 100644 index 480bf2f5c67..00000000000 --- a/debian/clickhouse-client.postinst +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh -set -e - -CLICKHOUSE_USER=${CLICKHOUSE_USER=clickhouse} - -mkdir -p /etc/clickhouse-client/conf.d - -#DEBHELPER# diff --git a/debian/clickhouse-common-static.install b/debian/clickhouse-common-static.install index bd65f17ad42..087a6dbba8f 100644 --- a/debian/clickhouse-common-static.install +++ b/debian/clickhouse-common-static.install @@ -3,4 +3,3 @@ usr/bin/clickhouse-odbc-bridge usr/bin/clickhouse-library-bridge usr/bin/clickhouse-extract-from-config usr/share/bash-completion/completions -etc/security/limits.d/clickhouse.conf diff --git a/debian/clickhouse-server.config b/debian/clickhouse-server.config deleted file mode 100644 index 636ff7f4da7..00000000000 --- a/debian/clickhouse-server.config +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh -e - -test -f /usr/share/debconf/confmodule && . /usr/share/debconf/confmodule - -db_fget clickhouse-server/default-password seen || true -password_seen="$RET" - -if [ "$1" = "reconfigure" ]; then - password_seen=false -fi - -if [ "$password_seen" != "true" ]; then - db_input high clickhouse-server/default-password || true - db_go || true -fi -db_go || true diff --git a/debian/clickhouse-server.postinst b/debian/clickhouse-server.postinst index dc876f45954..419c13e3daf 100644 --- a/debian/clickhouse-server.postinst +++ b/debian/clickhouse-server.postinst @@ -23,11 +23,13 @@ if [ ! -f "/etc/debian_version" ]; then fi if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then + + ${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}" + if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then # if old rc.d service present - remove it if [ -x "/etc/init.d/clickhouse-server" ] && [ -x "/usr/sbin/update-rc.d" ]; then /usr/sbin/update-rc.d clickhouse-server remove - echo "ClickHouse init script has migrated to systemd. Please manually stop old server and restart the service: sudo killall clickhouse-server && sleep 5 && sudo service clickhouse-server restart" fi /bin/systemctl daemon-reload @@ -38,10 +40,8 @@ if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then if [ -x "/usr/sbin/update-rc.d" ]; then /usr/sbin/update-rc.d clickhouse-server defaults 19 19 >/dev/null || exit $? else - echo # TODO [ "$OS" = "rhel" ] || [ "$OS" = "centos" ] || [ "$OS" = "fedora" ] + echo # Other OS fi fi fi - - ${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}" fi diff --git a/debian/clickhouse-server.preinst b/debian/clickhouse-server.preinst deleted file mode 100644 index 3529aefa7da..00000000000 --- a/debian/clickhouse-server.preinst +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -if [ "$1" = "upgrade" ]; then - # Return etc/cron.d/clickhouse-server to original state - service clickhouse-server disable_cron ||: -fi - -#DEBHELPER# diff --git a/debian/clickhouse-server.prerm b/debian/clickhouse-server.prerm deleted file mode 100644 index 02e855a7125..00000000000 --- a/debian/clickhouse-server.prerm +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -if [ "$1" = "upgrade" ] || [ "$1" = "remove" ]; then - # Return etc/cron.d/clickhouse-server to original state - service clickhouse-server disable_cron ||: -fi diff --git a/debian/clickhouse-server.templates b/debian/clickhouse-server.templates deleted file mode 100644 index dd55824e15c..00000000000 --- a/debian/clickhouse-server.templates +++ /dev/null @@ -1,3 +0,0 @@ -Template: clickhouse-server/default-password -Type: password -Description: Enter password for default user: diff --git a/debian/clickhouse.limits b/debian/clickhouse.limits deleted file mode 100644 index aca44082c4e..00000000000 --- a/debian/clickhouse.limits +++ /dev/null @@ -1,2 +0,0 @@ -clickhouse soft nofile 262144 -clickhouse hard nofile 262144 diff --git a/debian/pbuilder-hooks/A00ccache b/debian/pbuilder-hooks/A00ccache deleted file mode 100755 index 575358f31eb..00000000000 --- a/debian/pbuilder-hooks/A00ccache +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# set -x - -# CCACHEDIR - for pbuilder ; CCACHE_DIR - for ccache - -echo "CCACHEDIR=$CCACHEDIR CCACHE_DIR=$CCACHE_DIR SET_CCACHEDIR=$SET_CCACHEDIR" - -[ -z "$CCACHE_DIR" ] && export CCACHE_DIR=${CCACHEDIR:=${SET_CCACHEDIR=/var/cache/pbuilder/ccache}} - -if [ -n "$CCACHE_DIR" ]; then - mkdir -p $CCACHE_DIR $DISTCC_DIR ||: - chown -R $BUILDUSERID:$BUILDUSERID $CCACHE_DIR $DISTCC_DIR ||: - chmod -R a+rwx $CCACHE_DIR $DISTCC_DIR ||: -fi - -[ $CCACHE_PREFIX = 'distcc' ] && mkdir -p $DISTCC_DIR && echo "localhost/`nproc`" >> $DISTCC_DIR/hosts && distcc --show-hosts - -df -h -ccache --show-stats -ccache --zero-stats -ccache --max-size=${CCACHE_SIZE:=32G} diff --git a/debian/pbuilder-hooks/A01xlocale b/debian/pbuilder-hooks/A01xlocale deleted file mode 100755 index 0e90f4ee71c..00000000000 --- a/debian/pbuilder-hooks/A01xlocale +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -# https://github.com/llvm-mirror/libcxx/commit/6e02e89f65ca1ca1d6ce30fbc557563164dd327e - -touch /usr/include/xlocale.h diff --git a/debian/pbuilder-hooks/B00ccache-stat b/debian/pbuilder-hooks/B00ccache-stat deleted file mode 100755 index fdf6db1b7e7..00000000000 --- a/debian/pbuilder-hooks/B00ccache-stat +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -ccache --show-stats diff --git a/debian/pbuilder-hooks/B90test-server b/debian/pbuilder-hooks/B90test-server deleted file mode 100755 index e36c255f9fc..00000000000 --- a/debian/pbuilder-hooks/B90test-server +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash -set -e -set -x - -TEST_CONNECT=${TEST_CONNECT=1} -TEST_SSL=${TEST_SSL=1} -PACKAGE_INSTALL=${PACKAGE_INSTALL=1} -TEST_PORT_RANDOM=${TEST_PORT_RANDOM=1} - -if [ "${PACKAGE_INSTALL}" ]; then - dpkg --auto-deconfigure -i /tmp/buildd/*.deb ||: - apt install -y -f --allow-downgrades ||: - dpkg -l | grep clickhouse ||: - - # Second install to replace debian versions - dpkg --auto-deconfigure -i /tmp/buildd/*.deb ||: - dpkg -l | grep clickhouse ||: - - # Some test references uses specific timezone - ln -fs /usr/share/zoneinfo/Europe/Moscow /etc/localtime - echo 'Europe/Moscow' > /etc/timezone - dpkg-reconfigure -f noninteractive tzdata -fi - -mkdir -p /etc/clickhouse-server/config.d /etc/clickhouse-client/config.d - -if [ "${TEST_PORT_RANDOM}" ]; then - CLICKHOUSE_PORT_BASE=${CLICKHOUSE_PORT_BASE:=$(( ( RANDOM % 50000 ) + 10000 ))} - CLICKHOUSE_PORT_TCP=${CLICKHOUSE_PORT_TCP:=$(($CLICKHOUSE_PORT_BASE + 1))} - CLICKHOUSE_PORT_HTTP=${CLICKHOUSE_PORT_HTTP:=$(($CLICKHOUSE_PORT_BASE + 2))} - CLICKHOUSE_PORT_INTERSERVER=${CLICKHOUSE_PORT_INTERSERVER:=$(($CLICKHOUSE_PORT_BASE + 3))} - CLICKHOUSE_PORT_TCP_SECURE=${CLICKHOUSE_PORT_TCP_SECURE:=$(($CLICKHOUSE_PORT_BASE + 4))} - CLICKHOUSE_PORT_HTTPS=${CLICKHOUSE_PORT_HTTPS:=$(($CLICKHOUSE_PORT_BASE + 5))} -fi - -export CLICKHOUSE_PORT_TCP=${CLICKHOUSE_PORT_TCP:=9000} -export CLICKHOUSE_PORT_HTTP=${CLICKHOUSE_PORT_HTTP:=8123} -export CLICKHOUSE_PORT_INTERSERVER=${CLICKHOUSE_PORT_INTERSERVER:=9009} -export CLICKHOUSE_PORT_TCP_SECURE=${CLICKHOUSE_PORT_TCP_SECURE:=9440} -export CLICKHOUSE_PORT_HTTPS=${CLICKHOUSE_PORT_HTTPS:=8443} - -if [ "${TEST_CONNECT}" ]; then - [ "${TEST_PORT_RANDOM}" ] && echo "${CLICKHOUSE_PORT_HTTP}${CLICKHOUSE_PORT_TCP}${CLICKHOUSE_PORT_INTERSERVER}" > /etc/clickhouse-server/config.d/port.xml - - if [ "${TEST_SSL}" ]; then - CLICKHOUSE_SSL_CONFIG="noneAcceptCertificateHandler" - echo "${CLICKHOUSE_PORT_HTTPS}${CLICKHOUSE_PORT_TCP_SECURE}${CLICKHOUSE_SSL_CONFIG}" > /etc/clickhouse-server/config.d/ssl.xml - echo "${CLICKHOUSE_PORT_TCP}${CLICKHOUSE_PORT_TCP_SECURE}${CLICKHOUSE_SSL_CONFIG}" > /etc/clickhouse-client/config.xml - openssl dhparam -out /etc/clickhouse-server/dhparam.pem 256 - openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt - chmod -f a+r /etc/clickhouse-server/* /etc/clickhouse-client/* ||: - CLIENT_ADD+="--secure --port ${CLICKHOUSE_PORT_TCP_SECURE}" - else - CLIENT_ADD+="--port ${CLICKHOUSE_PORT_TCP}" - fi - - # For debug - # tail -n +1 -- /etc/clickhouse-server/*.xml /etc/clickhouse-server/config.d/*.xml ||: - - function finish { - service clickhouse-server stop - tail -n 100 /var/log/clickhouse-server/*.log ||: - sleep 1 - killall -9 clickhouse-server ||: - } - trap finish EXIT SIGINT SIGQUIT SIGTERM - - service clickhouse-server start - sleep ${TEST_SERVER_STARTUP_WAIT:=5} - service clickhouse-server status - - # TODO: remove me or make only on error: - tail -n100 /var/log/clickhouse-server/*.log ||: - - clickhouse-client --port $CLICKHOUSE_PORT_TCP -q "SELECT * from system.build_options;" - clickhouse-client ${CLIENT_ADD} -q "SELECT toDateTime(1);" - - ( [ "${TEST_RUN}" ] && clickhouse-test --queries /usr/share/clickhouse-test/queries --tmp /tmp/clickhouse-test/ ${TEST_OPT} ) || ${TEST_TRUE:=true} - - service clickhouse-server stop - -fi - -# Test debug symbols -# gdb -ex quit --args /usr/bin/clickhouse-server diff --git a/debian/pbuilder-hooks/C99kill-make b/debian/pbuilder-hooks/C99kill-make deleted file mode 100755 index 2068e75dc40..00000000000 --- a/debian/pbuilder-hooks/C99kill-make +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -# Try stop parallel build after timeout - -killall make gcc gcc-8 g++-8 gcc-9 g++-9 clang clang-6.0 clang++-6.0 clang-7 clang++-7 ||: diff --git a/debian/rules b/debian/rules index 8eb47e95389..73d1f3d3b34 100755 --- a/debian/rules +++ b/debian/rules @@ -113,9 +113,6 @@ override_dh_install: ln -sf clickhouse-server.docs debian/clickhouse-client.docs ln -sf clickhouse-server.docs debian/clickhouse-common-static.docs - mkdir -p $(DESTDIR)/etc/security/limits.d - cp debian/clickhouse.limits $(DESTDIR)/etc/security/limits.d/clickhouse.conf - # systemd compatibility mkdir -p $(DESTDIR)/etc/systemd/system/ cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/ diff --git a/debian/watch b/debian/watch index 7ad4cedf713..ed3cab97ade 100644 --- a/debian/watch +++ b/debian/watch @@ -1,6 +1,6 @@ version=4 opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)-stable\.tar\.gz%clickhouse-$1.tar.gz%" \ - https://github.com/yandex/clickhouse/tags \ + https://github.com/ClickHouse/ClickHouse/tags \ (?:.*?/)?v?(\d[\d.]*)-stable\.tar\.gz debian uupdate diff --git a/docker/client/Dockerfile b/docker/client/Dockerfile index 2efba9735ae..569025dec1c 100644 --- a/docker/client/Dockerfile +++ b/docker/client/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.5.1.* +ARG version=21.6.1.* RUN apt-get update \ && apt-get install --yes --no-install-recommends \ diff --git a/docker/packager/README.md b/docker/packager/README.md index 9fbc2d7f8b5..a745f6225fa 100644 --- a/docker/packager/README.md +++ b/docker/packager/README.md @@ -3,10 +3,10 @@ compilers and build settings. Correctly configured Docker daemon is single depen Usage: -Build deb package with `gcc-9` in `debug` mode: +Build deb package with `clang-11` in `debug` mode: ``` $ mkdir deb/test_output -$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=gcc-9 --build-type=debug +$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-11 --build-type=debug $ ls -l deb/test_output -rw-r--r-- 1 root root 3730 clickhouse-client_18.14.2+debug_all.deb -rw-r--r-- 1 root root 84221888 clickhouse-common-static_18.14.2+debug_amd64.deb @@ -18,11 +18,11 @@ $ ls -l deb/test_output ``` -Build ClickHouse binary with `clang-10` and `address` sanitizer in `relwithdebuginfo` +Build ClickHouse binary with `clang-11` and `address` sanitizer in `relwithdebuginfo` mode: ``` $ mkdir $HOME/some_clickhouse -$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-10 --sanitizer=address +$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-11 --sanitizer=address $ ls -l $HOME/some_clickhouse -rwxr-xr-x 1 root root 787061952 clickhouse lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 94c7f934f6e..56b2af5cf84 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -35,35 +35,27 @@ RUN apt-get update \ RUN apt-get update \ && apt-get install \ bash \ - cmake \ + build-essential \ ccache \ - curl \ - gcc-9 \ - g++-9 \ - clang-10 \ - clang-tidy-10 \ - lld-10 \ - llvm-10 \ - llvm-10-dev \ clang-11 \ clang-tidy-11 \ + cmake \ + curl \ + g++-10 \ + gcc-10 \ + gdb \ + git \ + gperf \ + libicu-dev \ + libreadline-dev \ lld-11 \ llvm-11 \ llvm-11-dev \ - libicu-dev \ - libreadline-dev \ + moreutils \ ninja-build \ - gperf \ - git \ - opencl-headers \ - ocl-icd-libopencl1 \ - intel-opencl-icd \ - tzdata \ - gperf \ - cmake \ - gdb \ + pigz \ rename \ - build-essential \ + tzdata \ --yes --no-install-recommends # This symlink required by gcc to find lld compiler @@ -111,4 +103,4 @@ RUN rm /etc/apt/sources.list.d/proposed-repositories.list && apt-get update COPY build.sh / -CMD ["/bin/bash", "/build.sh"] +CMD ["bash", "-c", "/build.sh 2>&1 | ts"] diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index d5830bf620b..cf74105fbbb 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -11,16 +11,28 @@ tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolc mkdir -p build/cmake/toolchain/freebsd-x86_64 tar xJf freebsd-11.3-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1 +# Uncomment to debug ccache. Don't put ccache log in /output right away, or it +# will be confusingly packed into the "performance" package. +# export CCACHE_LOGFILE=/build/ccache.log +# export CCACHE_DEBUG=1 + mkdir -p build/build_docker cd build/build_docker -ccache --show-stats ||: -ccache --zero-stats ||: rm -f CMakeCache.txt # Read cmake arguments into array (possibly empty) read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}" cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" .. + +ccache --show-config ||: +ccache --show-stats ||: +ccache --zero-stats ||: + # shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty. ninja $NINJA_FLAGS clickhouse-bundle + +ccache --show-config ||: +ccache --show-stats ||: + mv ./programs/clickhouse* /output mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds find . -name '*.so' -print -exec mv '{}' /output \; @@ -64,8 +76,21 @@ then cp ../programs/server/config.xml /output/config cp ../programs/server/users.xml /output/config cp -r --dereference ../programs/server/config.d /output/config - tar -czvf "$COMBINED_OUTPUT.tgz" /output + tar -cv -I pigz -f "$COMBINED_OUTPUT.tgz" /output rm -r /output/* mv "$COMBINED_OUTPUT.tgz" /output fi -ccache --show-stats ||: + +if [ "${CCACHE_DEBUG:-}" == "1" ] +then + find . -name '*.ccache-*' -print0 \ + | tar -c -I pixz -f /output/ccache-debug.txz --null -T - +fi + +if [ -n "$CCACHE_LOGFILE" ] +then + # Compress the log as well, or else the CI will try to compress all log + # files in place, and will fail because this directory is not writable. + tar -cv -I pixz -f /output/ccache.log.txz "$CCACHE_LOGFILE" +fi + diff --git a/docker/packager/deb/Dockerfile b/docker/packager/deb/Dockerfile index 8fd89d60f85..2f1d28efe61 100644 --- a/docker/packager/deb/Dockerfile +++ b/docker/packager/deb/Dockerfile @@ -34,31 +34,25 @@ RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \ # Libraries from OS are only needed to test the "unbundled" build (this is not used in production). RUN apt-get update \ && apt-get install \ - gcc-9 \ - g++-9 \ + alien \ clang-11 \ clang-tidy-11 \ + cmake \ + debhelper \ + devscripts \ + gdb \ + git \ + gperf \ lld-11 \ llvm-11 \ llvm-11-dev \ - clang-10 \ - clang-tidy-10 \ - lld-10 \ - llvm-10 \ - llvm-10-dev \ + moreutils \ ninja-build \ perl \ - pkg-config \ - devscripts \ - debhelper \ - git \ - tzdata \ - gperf \ - alien \ - cmake \ - gdb \ - moreutils \ pigz \ + pixz \ + pkg-config \ + tzdata \ --yes --no-install-recommends # NOTE: For some reason we have outdated version of gcc-10 in ubuntu 20.04 stable. diff --git a/docker/packager/deb/build.sh b/docker/packager/deb/build.sh index 9ae80cb56b0..4e14574b738 100755 --- a/docker/packager/deb/build.sh +++ b/docker/packager/deb/build.sh @@ -2,10 +2,16 @@ set -x -e +# Uncomment to debug ccache. +# export CCACHE_LOGFILE=/build/ccache.log +# export CCACHE_DEBUG=1 + +ccache --show-config ||: ccache --show-stats ||: ccache --zero-stats ||: + read -ra ALIEN_PKGS <<< "${ALIEN_PKGS:-}" -build/release --no-pbuilder "${ALIEN_PKGS[@]}" | ts '%Y-%m-%d %H:%M:%S' +build/release "${ALIEN_PKGS[@]}" | ts '%Y-%m-%d %H:%M:%S' mv /*.deb /output mv -- *.changes /output mv -- *.buildinfo /output @@ -22,4 +28,19 @@ then mv /build/obj-*/src/unit_tests_dbms /output/binary fi fi + +ccache --show-config ||: ccache --show-stats ||: + +if [ "${CCACHE_DEBUG:-}" == "1" ] +then + find /build -name '*.ccache-*' -print0 \ + | tar -c -I pixz -f /output/ccache-debug.txz --null -T - +fi + +if [ -n "$CCACHE_LOGFILE" ] +then + # Compress the log as well, or else the CI will try to compress all log + # files in place, and will fail because this directory is not writable. + tar -cv -I pixz -f /output/ccache.log.txz "$CCACHE_LOGFILE" +fi diff --git a/docker/packager/packager b/docker/packager/packager index 65c03cc10e3..836f30dec42 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -143,8 +143,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ cmake_flags.append('-DUSE_GTEST=1') if unbundled: - # TODO: fix build with ENABLE_RDKAFKA - cmake_flags.append('-DUNBUNDLED=1 -DUSE_INTERNAL_RDKAFKA_LIBRARY=1 -DENABLE_ARROW=0 -DENABLE_ORC=0 -DENABLE_PARQUET=0') + cmake_flags.append('-DUNBUNDLED=1 -DUSE_INTERNAL_RDKAFKA_LIBRARY=1 -DENABLE_ARROW=0 -DENABLE_AVRO=0 -DENABLE_ORC=0 -DENABLE_PARQUET=0') if split_binary: cmake_flags.append('-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1') @@ -182,9 +181,8 @@ if __name__ == "__main__": parser.add_argument("--clickhouse-repo-path", default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir)) parser.add_argument("--output-dir", required=True) parser.add_argument("--build-type", choices=("debug", ""), default="") - parser.add_argument("--compiler", choices=("clang-10", "clang-10-darwin", "clang-10-aarch64", "clang-10-freebsd", - "clang-11", "clang-11-darwin", "clang-11-aarch64", "clang-11-freebsd", - "gcc-9", "gcc-10"), default="gcc-9") + parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-aarch64", "clang-11-freebsd", + "gcc-10"), default="clang-11") parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="") parser.add_argument("--unbundled", action="store_true") parser.add_argument("--split-binary", action="store_true") diff --git a/docker/packager/unbundled/Dockerfile b/docker/packager/unbundled/Dockerfile index f640c595f14..4dd6dbc61d8 100644 --- a/docker/packager/unbundled/Dockerfile +++ b/docker/packager/unbundled/Dockerfile @@ -35,9 +35,6 @@ RUN apt-get update \ libjemalloc-dev \ libmsgpack-dev \ libcurl4-openssl-dev \ - opencl-headers \ - ocl-icd-libopencl1 \ - intel-opencl-icd \ unixodbc-dev \ odbcinst \ tzdata \ diff --git a/docker/packager/unbundled/build.sh b/docker/packager/unbundled/build.sh index 99fc34fd9f3..c43c6b5071e 100755 --- a/docker/packager/unbundled/build.sh +++ b/docker/packager/unbundled/build.sh @@ -5,7 +5,7 @@ set -x -e ccache --show-stats ||: ccache --zero-stats ||: read -ra ALIEN_PKGS <<< "${ALIEN_PKGS:-}" -build/release --no-pbuilder "${ALIEN_PKGS[@]}" | ts '%Y-%m-%d %H:%M:%S' +build/release "${ALIEN_PKGS[@]}" | ts '%Y-%m-%d %H:%M:%S' mv /*.deb /output mv -- *.changes /output mv -- *.buildinfo /output diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index 05ca29f22d4..d302fec7417 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.5.1.* +ARG version=21.6.1.* ARG gosu_ver=1.10 # set non-empty deb_location_url url to create a docker image @@ -64,6 +64,8 @@ RUN groupadd -r clickhouse --gid=101 \ clickhouse-client=$version \ clickhouse-server=$version ; \ fi \ + && wget --progress=bar:force:noscroll "https://github.com/tianon/gosu/releases/download/$gosu_ver/gosu-$(dpkg --print-architecture)" -O /bin/gosu \ + && chmod +x /bin/gosu \ && clickhouse-local -q 'SELECT * FROM system.build_options' \ && rm -rf \ /var/lib/apt/lists/* \ @@ -76,8 +78,6 @@ RUN groupadd -r clickhouse --gid=101 \ # we need to allow "others" access to clickhouse folder, because docker container # can be started with arbitrary uid (openshift usecase) -ADD https://github.com/tianon/gosu/releases/download/$gosu_ver/gosu-amd64 /bin/gosu - RUN locale-gen en_US.UTF-8 ENV LANG en_US.UTF-8 ENV LANGUAGE en_US:en @@ -88,10 +88,7 @@ RUN mkdir /docker-entrypoint-initdb.d COPY docker_related_config.xml /etc/clickhouse-server/config.d/ COPY entrypoint.sh /entrypoint.sh - -RUN chmod +x \ - /entrypoint.sh \ - /bin/gosu +RUN chmod +x /entrypoint.sh EXPOSE 9000 8123 9009 VOLUME /var/lib/clickhouse diff --git a/docker/test/Dockerfile b/docker/test/Dockerfile index 976c46ebe27..0e4646386ce 100644 --- a/docker/test/Dockerfile +++ b/docker/test/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" -ARG version=21.5.1.* +ARG version=21.6.1.* RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index e8653c2122e..44b9d42d6a1 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -51,13 +51,13 @@ RUN apt-get update \ # Sanitizer options for services (clickhouse-server) RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7'" >> /etc/environment; \ echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \ - echo "MSAN_OPTIONS='abort_on_error=1'" >> /etc/environment; \ + echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment; \ echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment; \ ln -s /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-symbolizer /usr/bin/llvm-symbolizer; # Sanitizer options for current shell (not current, but the one that will be spawned on "docker run") # (but w/o verbosity for TSAN, otherwise test.reference will not match) ENV TSAN_OPTIONS='halt_on_error=1 history_size=7' ENV UBSAN_OPTIONS='print_stacktrace=1' -ENV MSAN_OPTIONS='abort_on_error=1' +ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1' CMD sleep 1 diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index c21a115289d..42c720a7e63 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -300,6 +300,7 @@ function run_tests 01663_aes_msan # Depends on OpenSSL 01667_aes_args_check # Depends on OpenSSL 01776_decrypt_aead_size_check # Depends on OpenSSL + 01811_filter_by_null # Depends on OpenSSL 01281_unsucceeded_insert_select_queries_counter 01292_create_user 01294_lazy_database_concurrent @@ -307,10 +308,10 @@ function run_tests 01354_order_by_tuple_collate_const 01355_ilike 01411_bayesian_ab_testing - 01532_collate_in_low_cardinality - 01533_collate_in_nullable - 01542_collate_in_array - 01543_collate_in_tuple + 01798_uniq_theta_sketch + 01799_long_uniq_theta_sketch + collate + collation _orc_ arrow avro @@ -365,6 +366,16 @@ function run_tests # JSON functions 01666_blns + + # Requires postgresql-client + 01802_test_postgresql_protocol_with_row_policy + + # Depends on AWS + 01801_s3_cluster + + # Depends on LLVM JIT + 01852_jit_if + 01865_jit_comparison_constant_result ) (time clickhouse-test --hung-check -j 8 --order=random --use-skip-list --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 ||:) | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt" diff --git a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml index 1e82f137961..dd6b7467afc 100644 --- a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml +++ b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml @@ -14,11 +14,6 @@ 10G - - - - - diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 4bd3fa717a2..626bedb453c 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -198,7 +198,7 @@ case "$stage" in # Lost connection to the server. This probably means that the server died # with abort. echo "failure" > status.txt - if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt + if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt then echo "Lost connection to server. See the logs." > description.txt fi diff --git a/docker/test/integration/runner/compose/docker_compose_mysql_8_0_for_materialize_mysql.yml b/docker/test/integration/runner/compose/docker_compose_mysql_8_0_for_materialize_mysql.yml index 7c8a930c84e..93bfee35caf 100644 --- a/docker/test/integration/runner/compose/docker_compose_mysql_8_0_for_materialize_mysql.yml +++ b/docker/test/integration/runner/compose/docker_compose_mysql_8_0_for_materialize_mysql.yml @@ -6,7 +6,7 @@ services: environment: MYSQL_ROOT_PASSWORD: clickhouse ports: - - 33308:3306 + - 3309:3306 command: --server_id=100 --log-bin='mysql-bin-1.log' --default_authentication_plugin='mysql_native_password' --default-time-zone='+3:00' diff --git a/docker/test/integration/runner/dockerd-entrypoint.sh b/docker/test/integration/runner/dockerd-entrypoint.sh index bda6f5a719d..9b100d947b0 100755 --- a/docker/test/integration/runner/dockerd-entrypoint.sh +++ b/docker/test/integration/runner/dockerd-entrypoint.sh @@ -1,6 +1,17 @@ #!/bin/bash set -e +mkdir -p /etc/docker/ +cat > /etc/docker/daemon.json << EOF +{ + "ipv6": true, + "fixed-cidr-v6": "fd00::/8", + "ip-forward": true, + "insecure-registries" : ["dockerhub-proxy.sas.yp-c.yandex.net:5000"], + "registry-mirrors" : ["http://dockerhub-proxy.sas.yp-c.yandex.net:5000"] +} +EOF + dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 &>/var/log/somefile & set +e diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 4507de16492..093629e61fc 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -243,9 +243,12 @@ function run_tests profile_seconds_left=600 # Run the tests. + total_tests=$(echo "$test_files" | wc -w) + current_test=0 test_name="" for test in $test_files do + echo "$current_test of $total_tests tests complete" > status.txt # Check that both servers are alive, and restart them if they die. clickhouse-client --port $LEFT_SERVER_PORT --query "select 1 format Null" \ || { echo $test_name >> left-server-died.log ; restart ; } @@ -273,6 +276,7 @@ function run_tests profile_seconds_left=$(awk -F' ' \ 'BEGIN { s = '$profile_seconds_left'; } /^profile-total/ { s -= $2 } END { print s }' \ "$test_name-raw.tsv") + current_test=$((current_test + 1)) done unset TIMEFORMAT @@ -1015,6 +1019,7 @@ done wait # Create per-query flamegraphs +touch report/query-files.txt IFS=$'\n' for version in {right,left} do @@ -1149,20 +1154,21 @@ function upload_results return 0 fi - # Surprisingly, clickhouse-client doesn't understand --host 127.0.0.1:9000 - # so I have to extract host and port with clickhouse-local. I tried to use - # Poco URI parser to support this in the client, but it's broken and can't - # parse host:port. set +x # Don't show password in the log - clickhouse-client \ - $(clickhouse-local --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV") \ - --secure \ - --user "${CHPC_DATABASE_USER}" \ - --password "${CHPC_DATABASE_PASSWORD}" \ - --config "right/config/client_config.xml" \ - --database perftest \ - --date_time_input_format=best_effort \ - --query " + client=(clickhouse-client + # Surprisingly, clickhouse-client doesn't understand --host 127.0.0.1:9000 + # so I have to extract host and port with clickhouse-local. I tried to use + # Poco URI parser to support this in the client, but it's broken and can't + # parse host:port. + $(clickhouse-local --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV") + --secure + --user "${CHPC_DATABASE_USER}" + --password "${CHPC_DATABASE_PASSWORD}" + --config "right/config/client_config.xml" + --database perftest + --date_time_input_format=best_effort) + + "${client[@]}" --query " insert into query_metrics_v2 select toDate(event_time) event_date, @@ -1185,6 +1191,31 @@ function upload_results format TSV settings date_time_input_format='best_effort' " < report/all-query-metrics.tsv # Don't leave whitespace after INSERT: https://github.com/ClickHouse/ClickHouse/issues/16652 + + # Upload some run attributes. I use this weird form because it is the same + # form that can be used for historical data when you only have compare.log. + cat compare.log \ + | sed -n ' + s/.*Model name:[[:space:]]\+\(.*\)$/metric lscpu-model-name \1/p; + s/.*L1d cache:[[:space:]]\+\(.*\)$/metric lscpu-l1d-cache \1/p; + s/.*L1i cache:[[:space:]]\+\(.*\)$/metric lscpu-l1i-cache \1/p; + s/.*L2 cache:[[:space:]]\+\(.*\)$/metric lscpu-l2-cache \1/p; + s/.*L3 cache:[[:space:]]\+\(.*\)$/metric lscpu-l3-cache \1/p; + s/.*left_sha=\(.*\)$/old-sha \1/p; + s/.*right_sha=\(.*\)/new-sha \1/p' \ + | awk ' + BEGIN { FS = "\t"; OFS = "\t" } + /^old-sha/ { old_sha=$2 } + /^new-sha/ { new_sha=$2 } + /^metric/ { print old_sha, new_sha, $2, $3 }' \ + | "${client[@]}" --query "INSERT INTO run_attributes_v1 FORMAT TSV" + + # Grepping numactl results from log is too crazy, I'll just call it again. + "${client[@]}" --query "INSERT INTO run_attributes_v1 FORMAT TSV" < + diff --git a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml b/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml index 41bc7f777bf..2c06be9bb91 100644 --- a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml +++ b/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml @@ -17,6 +17,12 @@ 12 + + + 64Mi + + + 0 diff --git a/docker/test/performance-comparison/perf.py b/docker/test/performance-comparison/perf.py index 4727f485943..8231caceca8 100755 --- a/docker/test/performance-comparison/perf.py +++ b/docker/test/performance-comparison/perf.py @@ -66,7 +66,12 @@ reportStageEnd('parse') subst_elems = root.findall('substitutions/substitution') available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... } for e in subst_elems: - available_parameters[e.find('name').text] = [v.text for v in e.findall('values/value')] + name = e.find('name').text + values = [v.text for v in e.findall('values/value')] + if not values: + raise Exception(f'No values given for substitution {{{name}}}') + + available_parameters[name] = values # Takes parallel lists of templates, substitutes them with all combos of # parameters. The set of parameters is determined based on the first list. @@ -76,7 +81,10 @@ def substitute_parameters(query_templates, other_templates = []): query_results = [] other_results = [[]] * (len(other_templates)) for i, q in enumerate(query_templates): - keys = set(n for _, n, _, _ in string.Formatter().parse(q) if n) + # We need stable order of keys here, so that the order of substitutions + # is always the same, and the query indexes are consistent across test + # runs. + keys = sorted(set(n for _, n, _, _ in string.Formatter().parse(q) if n)) values = [available_parameters[k] for k in keys] combos = itertools.product(*values) for c in combos: diff --git a/docker/test/performance-comparison/report.py b/docker/test/performance-comparison/report.py index 9d3ccabb788..42490971127 100755 --- a/docker/test/performance-comparison/report.py +++ b/docker/test/performance-comparison/report.py @@ -520,12 +520,13 @@ if args.report == 'main': for t in tables: print(t) - print(""" + print(f""" @@ -638,12 +639,13 @@ elif args.report == 'all-queries': for t in tables: print(t) - print(""" + print(f""" diff --git a/docker/test/pvs/Dockerfile b/docker/test/pvs/Dockerfile index 382b486dda3..2983be2305f 100644 --- a/docker/test/pvs/Dockerfile +++ b/docker/test/pvs/Dockerfile @@ -41,6 +41,6 @@ CMD echo "Running PVS version $PKG_VERSION" && cd /repo_folder && pvs-studio-ana && cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"USE_INTERNAL_PROTOBUF_LIBRARY"=OFF -D"USE_INTERNAL_GRPC_LIBRARY"=OFF \ && ninja re2_st clickhouse_grpc_protos \ && pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \ + cp /repo_folder/pvs-studio.log /test_output; \ plog-converter -a GA:1,2 -t fullhtml -o /test_output/pvs-studio-html-report pvs-studio.log; \ plog-converter -a GA:1,2 -t tasklist -o /test_output/pvs-studio-task-report.txt pvs-studio.log - diff --git a/docker/test/sqlancer/Dockerfile b/docker/test/sqlancer/Dockerfile index 253ca1b729a..6bcdc3df5cd 100644 --- a/docker/test/sqlancer/Dockerfile +++ b/docker/test/sqlancer/Dockerfile @@ -2,6 +2,7 @@ FROM ubuntu:20.04 RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git openjdk-14-jdk maven python3 --yes --no-install-recommends + RUN wget https://github.com/sqlancer/sqlancer/archive/master.zip -O /sqlancer.zip RUN mkdir /sqlancer && \ cd /sqlancer && \ diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 9e210dc92a2..8d865431570 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -21,14 +21,14 @@ function start() -- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \ --logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \ --tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \ - --mysql_port 19004 \ + --mysql_port 19004 --postgresql_port 19005 \ --keeper_server.tcp_port 19181 --keeper_server.server_id 2 sudo -E -u clickhouse /usr/bin/clickhouse server --config /etc/clickhouse-server2/config.xml --daemon \ -- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \ --logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \ --tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \ - --mysql_port 29004 \ + --mysql_port 29004 --postgresql_port 29005 \ --keeper_server.tcp_port 29181 --keeper_server.server_id 3 fi diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 61d1b2f4849..658ae1f27ba 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -28,7 +28,8 @@ RUN apt-get update -y \ tree \ unixodbc \ wget \ - mysql-client=5.7* + mysql-client=5.7* \ + postgresql-client RUN pip3 install numpy scipy pandas diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 20132eafb75..8440b1548a5 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -44,7 +44,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] -- --path /var/lib/clickhouse1/ --logger.stderr /var/log/clickhouse-server/stderr1.log \ --logger.log /var/log/clickhouse-server/clickhouse-server1.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server1.err.log \ --tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \ - --mysql_port 19004 \ + --mysql_port 19004 --postgresql_port 19005 \ --keeper_server.tcp_port 19181 --keeper_server.server_id 2 \ --macros.replica r2 # It doesn't work :( @@ -52,7 +52,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] -- --path /var/lib/clickhouse2/ --logger.stderr /var/log/clickhouse-server/stderr2.log \ --logger.log /var/log/clickhouse-server/clickhouse-server2.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server2.err.log \ --tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \ - --mysql_port 29004 \ + --mysql_port 29004 --postgresql_port 29005 \ --keeper_server.tcp_port 29181 --keeper_server.server_id 3 \ --macros.shard s2 # It doesn't work :( @@ -104,6 +104,29 @@ clickhouse-client -q "system flush logs" ||: pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz & clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz & clickhouse-client -q "select * from system.query_thread_log format TSVWithNamesAndTypes" | pigz > /test_output/query-thread-log.tsv.gz & +clickhouse-client --allow_introspection_functions=1 -q " + WITH + arrayMap(x -> concat(demangle(addressToSymbol(x)), ':', addressToLine(x)), trace) AS trace_array, + arrayStringConcat(trace_array, '\n') AS trace_string + SELECT * EXCEPT(trace), trace_string FROM system.trace_log FORMAT TSVWithNamesAndTypes +" | pigz > /test_output/trace-log.tsv.gz & + +# Also export trace log in flamegraph-friendly format. +for trace_type in CPU Memory Real +do + clickhouse-client -q " + select + arrayStringConcat((arrayMap(x -> concat(splitByChar('/', addressToLine(x))[-1], '#', demangle(addressToSymbol(x)) ), trace)), ';') AS stack, + count(*) AS samples + from system.trace_log + where trace_type = '$trace_type' + group by trace + order by samples desc + settings allow_introspection_functions = 1 + format TabSeparated" \ + | pigz > "/test_output/trace-log-$trace_type-flamegraph.tsv.gz" & +done + wait ||: mv /var/log/clickhouse-server/stderr.log /test_output/ ||: @@ -112,10 +135,13 @@ if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then fi tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||: tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||: +tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||: pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||: mv /var/log/clickhouse-server/stderr1.log /test_output/ ||: mv /var/log/clickhouse-server/stderr2.log /test_output/ ||: + tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||: + tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||: fi diff --git a/docker/test/stateless_unbundled/Dockerfile b/docker/test/stateless_unbundled/Dockerfile index 9efe08dbf23..c5463ac447d 100644 --- a/docker/test/stateless_unbundled/Dockerfile +++ b/docker/test/stateless_unbundled/Dockerfile @@ -14,9 +14,7 @@ RUN apt-get --allow-unauthenticated update -y \ expect \ gdb \ gperf \ - gperf \ heimdal-multidev \ - intel-opencl-icd \ libboost-filesystem-dev \ libboost-iostreams-dev \ libboost-program-options-dev \ @@ -50,9 +48,7 @@ RUN apt-get --allow-unauthenticated update -y \ moreutils \ ncdu \ netcat-openbsd \ - ocl-icd-libopencl1 \ odbcinst \ - opencl-headers \ openssl \ perl \ pigz \ diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 3594eead992..43a92fdeebe 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -20,6 +20,14 @@ function configure() # since we run clickhouse from root sudo chown root: /var/lib/clickhouse + + # Set more frequent update period of asynchronous metrics to more frequently update information about real memory usage (less chance of OOM). + echo "1" \ + > /etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml + + # Set maximum memory usage as half of total memory (less chance of OOM). + echo "0.5" \ + > /etc/clickhouse-server/config.d/max_server_memory_usage_to_ram_ratio.xml } function stop() @@ -108,6 +116,11 @@ zgrep -Fav "ASan doesn't fully support makecontext/swapcontext functions" > /dev || echo -e 'No sanitizer asserts\tOK' >> /test_output/test_results.tsv rm -f /test_output/tmp +# OOM +zgrep -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ + && echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ + || echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv + # Logical errors zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ && echo -e 'Logical error thrown (see clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ @@ -118,7 +131,7 @@ zgrep -Fa "########################################" /var/log/clickhouse-server/ && echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv -# It also checks for OOM or crash without stacktrace (printed by watchdog) +# It also checks for crash without stacktrace (printed by watchdog) zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ && echo -e 'Fatal message in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv @@ -131,6 +144,7 @@ pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhous tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: mv /var/log/clickhouse-server/stderr.log /test_output/ tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||: +tar -chf /test_output/trace_log_dump.tar /var/lib/clickhouse/data/system/trace_log ||: # Write check result into check_status.tsv clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%') LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv diff --git a/docker/test/stress/stress b/docker/test/stress/stress index 25a705ecbd1..4fbedceb0b8 100755 --- a/docker/test/stress/stress +++ b/docker/test/stress/stress @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- from multiprocessing import cpu_count -from subprocess import Popen, call, STDOUT +from subprocess import Popen, call, check_output, STDOUT import os import sys import shutil @@ -85,10 +85,27 @@ def prepare_for_hung_check(): # Issue #21004, live views are experimental, so let's just suppress it call("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """, shell=True, stderr=STDOUT) - # Wait for last queries to finish if any, not longer than 120 seconds + # Kill other queries which known to be slow + # It's query from 01232_preparing_sets_race_condition_long, it may take up to 1000 seconds in slow builds + call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'insert into tableB select %'" """, shell=True, stderr=STDOUT) + # Long query from 00084_external_agregation + call("""clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'" """, shell=True, stderr=STDOUT) + + # Wait for last queries to finish if any, not longer than 300 seconds call("""clickhouse client -q "select sleepEachRow(( - select maxOrDefault(120 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 120 - ) / 120) from numbers(120) format Null" """, shell=True, stderr=STDOUT) + select maxOrDefault(300 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 300 + ) / 300) from numbers(300) format Null" """, shell=True, stderr=STDOUT) + + # Even if all clickhouse-test processes are finished, there are probably some sh scripts, + # which still run some new queries. Let's ignore them. + try: + query = """clickhouse client -q "SELECT count() FROM system.processes where where elapsed > 300" """ + output = check_output(query, shell=True, stderr=STDOUT).decode('utf-8').strip() + if int(output) == 0: + return False + except: + pass + return True if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') @@ -119,12 +136,12 @@ if __name__ == "__main__": logging.info("All processes finished") if args.hung_check: - prepare_for_hung_check() + have_long_running_queries = prepare_for_hung_check() logging.info("Checking if some queries hung") cmd = "{} {} {}".format(args.test_cmd, "--hung-check", "00001_select_1") res = call(cmd, shell=True, stderr=STDOUT) hung_check_status = "No queries hung\tOK\n" - if res != 0: + if res != 0 and have_long_running_queries: logging.info("Hung check failed with exit code {}".format(res)) hung_check_status = "Hung check failed\tFAIL\n" open(os.path.join(args.output_folder, "test_results.tsv"), 'w+').write(hung_check_status) diff --git a/docker/test/testflows/runner/Dockerfile b/docker/test/testflows/runner/Dockerfile index bd7eee4c166..ae95c18bc14 100644 --- a/docker/test/testflows/runner/Dockerfile +++ b/docker/test/testflows/runner/Dockerfile @@ -35,10 +35,10 @@ RUN apt-get update \ ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone -RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal +RUN pip3 install urllib3 testflows==1.6.90 docker-compose==1.29.1 docker==5.0.0 dicttoxml kazoo tzlocal python-dateutil numpy ENV DOCKER_CHANNEL stable -ENV DOCKER_VERSION 17.09.1-ce +ENV DOCKER_VERSION 20.10.6 RUN set -eux; \ \ @@ -74,4 +74,3 @@ VOLUME /var/lib/docker EXPOSE 2375 ENTRYPOINT ["dockerd-entrypoint.sh"] CMD ["sh", "-c", "python3 regression.py --no-color -o classic --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv"] - diff --git a/docs/en/commercial/cloud.md b/docs/en/commercial/cloud.md index 953a0ab5748..5a897a77db2 100644 --- a/docs/en/commercial/cloud.md +++ b/docs/en/commercial/cloud.md @@ -31,10 +31,10 @@ toc_title: Cloud ## Alibaba Cloud {#alibaba-cloud} -Alibaba Cloud Managed Service for ClickHouse. [China Site](https://www.aliyun.com/product/clickhouse) (will be available at the international site in May 2021). Provides the following key features: +[Alibaba Cloud Managed Service for ClickHouse](https://www.alibabacloud.com/product/clickhouse) provides the following key features: - Highly reliable cloud disk storage engine based on [Alibaba Cloud Apsara](https://www.alibabacloud.com/product/apsara-stack) distributed system -- Expand capacity on-demand without manual data migration +- Expand capacity on demand without manual data migration - Support single-node, single-replica, multi-node, and multi-replica architectures, and support hot and cold data tiering - Support access allow-list, one-key recovery, multi-layer network security protection, cloud disk encryption - Seamless integration with cloud log systems, databases, and data application tools diff --git a/docs/en/development/adding_test_queries.md b/docs/en/development/adding_test_queries.md index 4770d48ebd4..db5355393ac 100644 --- a/docs/en/development/adding_test_queries.md +++ b/docs/en/development/adding_test_queries.md @@ -1,6 +1,6 @@ # How to add test queries to ClickHouse CI -ClickHouse has hundreds (or even thousands) of features. Every commit get checked by a complex set of tests containing many thousands of test cases. +ClickHouse has hundreds (or even thousands) of features. Every commit gets checked by a complex set of tests containing many thousands of test cases. The core functionality is very well tested, but some corner-cases and different combinations of features can be uncovered with ClickHouse CI. @@ -105,13 +105,13 @@ clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee te 5) ensure everything is correct, if the test output is incorrect (due to some bug for example), adjust the reference file using text editor. -#### How create good test +#### How to create good test - test should be - minimal - create only tables related to tested functionality, remove unrelated columns and parts of query - fast - should not take longer than few seconds (better subseconds) - correct - fails then feature is not working - - deteministic + - deterministic - isolated / stateless - don't rely on some environment things - don't rely on timing when possible @@ -124,7 +124,7 @@ clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee te - clean up the created objects after test and before the test (DROP IF EXISTS) - in case of some dirty state - prefer sync mode of operations (mutations, merges, etc.) - use other SQL files in the `0_stateless` folder as an example -- ensure the feature / feature combination you want to tests is not covered yet with existsing tests +- ensure the feature / feature combination you want to tests is not covered yet with existing tests #### Commit / push / create PR. diff --git a/docs/en/development/build-osx.md b/docs/en/development/build-osx.md index 886e85bbf86..f34107ca3d3 100644 --- a/docs/en/development/build-osx.md +++ b/docs/en/development/build-osx.md @@ -5,12 +5,13 @@ toc_title: Build on Mac OS X # How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x} -Build should work on x86_64 (Intel) based macOS 10.15 (Catalina) and higher with recent Xcode's native AppleClang, or Homebrew's vanilla Clang or GCC compilers. +Build should work on x86_64 (Intel) and arm64 (Apple Silicon) based macOS 10.15 (Catalina) and higher with recent Xcode's native AppleClang, or Homebrew's vanilla Clang or GCC compilers. ## Install Homebrew {#install-homebrew} ``` bash -$ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# ...and follow the printed instructions on any additional steps required to complete the installation. ``` ## Install Xcode and Command Line Tools {#install-xcode-and-command-line-tools} @@ -22,8 +23,8 @@ Open it at least once to accept the end-user license agreement and automatically Then, make sure that the latest Comman Line Tools are installed and selected in the system: ``` bash -$ sudo rm -rf /Library/Developer/CommandLineTools -$ sudo xcode-select --install +sudo rm -rf /Library/Developer/CommandLineTools +sudo xcode-select --install ``` Reboot. @@ -31,14 +32,15 @@ Reboot. ## Install Required Compilers, Tools, and Libraries {#install-required-compilers-tools-and-libraries} ``` bash -$ brew update -$ brew install cmake ninja libtool gettext llvm gcc +brew update +brew install cmake ninja libtool gettext llvm gcc ``` ## Checkout ClickHouse Sources {#checkout-clickhouse-sources} ``` bash -$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git # or https://github.com/ClickHouse/ClickHouse.git +git clone --recursive git@github.com:ClickHouse/ClickHouse.git +# ...alternatively, you can use https://github.com/ClickHouse/ClickHouse.git as the repo URL. ``` ## Build ClickHouse {#build-clickhouse} @@ -46,37 +48,37 @@ $ git clone --recursive git@github.com:ClickHouse/ClickHouse.git # or https://gi To build using Xcode's native AppleClang compiler: ``` bash -$ cd ClickHouse -$ rm -rf build -$ mkdir build -$ cd build -$ cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF .. -$ cmake --build . --config RelWithDebInfo -$ cd .. +cd ClickHouse +rm -rf build +mkdir build +cd build +cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo .. +cmake --build . --config RelWithDebInfo +cd .. ``` To build using Homebrew's vanilla Clang compiler: ``` bash -$ cd ClickHouse -$ rm -rf build -$ mkdir build -$ cd build -$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER==$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF .. -$ cmake --build . --config RelWithDebInfo -$ cd .. +cd ClickHouse +rm -rf build +mkdir build +cd build +cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo .. +cmake --build . --config RelWithDebInfo +cd .. ``` To build using Homebrew's vanilla GCC compiler: ``` bash -$ cd ClickHouse -$ rm -rf build -$ mkdir build -$ cd build -$ cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-10 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-10 -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF .. -$ cmake --build . --config RelWithDebInfo -$ cd .. +cd ClickHouse +rm -rf build +mkdir build +cd build +cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-10 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-10 -DCMAKE_BUILD_TYPE=RelWithDebInfo .. +cmake --build . --config RelWithDebInfo +cd .. ``` ## Caveats {#caveats} @@ -115,11 +117,18 @@ To do so, create the `/Library/LaunchDaemons/limit.maxfiles.plist` file with the Execute the following command: ``` bash -$ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist +sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist ``` Reboot. To check if it’s working, you can use `ulimit -n` command. +## Run ClickHouse server: + +``` +cd ClickHouse +./build/programs/clickhouse-server --config-file ./programs/server/config.xml +``` + [Original article](https://clickhouse.tech/docs/en/development/build_osx/) diff --git a/docs/en/development/build.md b/docs/en/development/build.md index 3181f26800d..b6cb68f7ff8 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -27,53 +27,20 @@ Or cmake3 instead of cmake on older systems. On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -```bash +```bash sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" ``` For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html). -#### Use clang-11 for Builds {#use-gcc-10-for-builds} +#### Use clang-11 for Builds ``` bash $ export CC=clang-11 $ export CXX=clang++-11 ``` -### Install GCC 10 {#install-gcc-10} - -We recommend building ClickHouse with clang-11, GCC-10 also supported, but it is not used for production builds. - -If you want to use GCC-10 there are several ways to install it. - -#### Install from Repository {#install-from-repository} - -On Ubuntu 19.10 or newer: - - $ sudo apt-get update - $ sudo apt-get install gcc-10 g++-10 - -#### Install from a PPA Package {#install-from-a-ppa-package} - -On older Ubuntu: - -``` bash -$ sudo apt-get install software-properties-common -$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test -$ sudo apt-get update -$ sudo apt-get install gcc-10 g++-10 -``` - -#### Install from Sources {#install-from-sources} - -See [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) - -#### Use GCC 10 for Builds {#use-gcc-10-for-builds} - -``` bash -$ export CC=gcc-10 -$ export CXX=g++-10 -``` +Gcc can also be used though it is discouraged. ### Checkout ClickHouse Sources {#checkout-clickhouse-sources} @@ -106,9 +73,9 @@ The build requires the following components: - Git (is used only to checkout the sources, it’s not needed for the build) - CMake 3.10 or newer -- Ninja (recommended) or Make -- C++ compiler: gcc 10 or clang 8 or newer -- Linker: lld or gold (the classic GNU ld won’t work) +- Ninja +- C++ compiler: clang-11 or newer +- Linker: lld - Python (is only used inside LLVM build and it is optional) If all the components are installed, you may build in the same way as the steps above. @@ -116,7 +83,7 @@ If all the components are installed, you may build in the same way as the steps Example for Ubuntu Eoan: ``` bash sudo apt update -sudo apt install git cmake ninja-build g++ python +sudo apt install git cmake ninja-build clang++ python git clone --recursive https://github.com/ClickHouse/ClickHouse.git mkdir build && cd build cmake ../ClickHouse @@ -125,7 +92,7 @@ ninja Example for OpenSUSE Tumbleweed: ``` bash -sudo zypper install git cmake ninja gcc-c++ python lld +sudo zypper install git cmake ninja clang-c++ python lld git clone --recursive https://github.com/ClickHouse/ClickHouse.git mkdir build && cd build cmake ../ClickHouse @@ -135,7 +102,7 @@ ninja Example for Fedora Rawhide: ``` bash sudo yum update -yum --nogpg install git cmake make gcc-c++ python3 +yum --nogpg install git cmake make clang-c++ python3 git clone --recursive https://github.com/ClickHouse/ClickHouse.git mkdir build && cd build cmake ../ClickHouse @@ -145,11 +112,11 @@ make -j $(nproc) ## How to Build ClickHouse Debian Package {#how-to-build-clickhouse-debian-package} -### Install Git and Pbuilder {#install-git-and-pbuilder} +### Install Git {#install-git} ``` bash $ sudo apt-get update -$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring +$ sudo apt-get install git python debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring ``` ### Checkout ClickHouse Sources {#checkout-clickhouse-sources-1} diff --git a/docs/en/development/cmake-in-clickhouse.md b/docs/en/development/cmake-in-clickhouse.md deleted file mode 100644 index 6e6ac825587..00000000000 --- a/docs/en/development/cmake-in-clickhouse.md +++ /dev/null @@ -1,284 +0,0 @@ -# CMake in ClickHouse - -## TL; DR How to make ClickHouse compile and link faster? - -Developer only! This command will likely fulfill most of your needs. Run before calling `ninja`. - -```cmake -cmake .. \ - -DCMAKE_C_COMPILER=/bin/clang-10 \ - -DCMAKE_CXX_COMPILER=/bin/clang++-10 \ - -DCMAKE_BUILD_TYPE=Debug \ - -DENABLE_CLICKHOUSE_ALL=OFF \ - -DENABLE_CLICKHOUSE_SERVER=ON \ - -DENABLE_CLICKHOUSE_CLIENT=ON \ - -DUSE_STATIC_LIBRARIES=OFF \ - -DSPLIT_SHARED_LIBRARIES=ON \ - -DENABLE_LIBRARIES=OFF \ - -DUSE_UNWIND=ON \ - -DENABLE_UTILS=OFF \ - -DENABLE_TESTS=OFF -``` - -## CMake files types - -1. ClickHouse's source CMake files (located in the root directory and in `/src`). -2. Arch-dependent CMake files (located in `/cmake/*os_name*`). -3. Libraries finders (search for contrib libraries, located in `/cmake/find`). -3. Contrib build CMake files (used instead of libraries' own CMake files, located in `/cmake/modules`) - -## List of CMake flags - -* This list is auto-generated by [this Python script](https://github.com/clickhouse/clickhouse/blob/master/docs/tools/cmake_in_clickhouse_generator.py). -* The flag name is a link to its position in the code. -* If an option's default value is itself an option, it's also a link to its position in this list. -### ClickHouse modes - -| Name | Default value | Description | Comment | -|------|---------------|-------------|---------| -| [`ENABLE_CLICKHOUSE_ALL`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L8) | `ON` | Enable all ClickHouse modes by default | The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.), each of them may be built and linked as a separate library. If you do not know what modes you need, turn this option OFF and enable SERVER and CLIENT only. | -| [`ENABLE_CLICKHOUSE_BENCHMARK`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L18) | `ENABLE_CLICKHOUSE_ALL` | Queries benchmarking mode | https://clickhouse.tech/docs/en/operations/utilities/clickhouse-benchmark/ | -| [`ENABLE_CLICKHOUSE_CLIENT`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L11) | `ENABLE_CLICKHOUSE_ALL` | Client mode (interactive tui/shell that connects to the server) | | -| [`ENABLE_CLICKHOUSE_COMPRESSOR`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L23) | `ENABLE_CLICKHOUSE_ALL` | Data compressor and decompressor | https://clickhouse.tech/docs/en/operations/utilities/clickhouse-compressor/ | -| [`ENABLE_CLICKHOUSE_COPIER`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L26) | `ENABLE_CLICKHOUSE_ALL` | Inter-cluster data copying mode | https://clickhouse.tech/docs/en/operations/utilities/clickhouse-copier/ | -| [`ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L20) | `ENABLE_CLICKHOUSE_ALL` | Configs processor (extract values etc.) | | -| [`ENABLE_CLICKHOUSE_FORMAT`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L28) | `ENABLE_CLICKHOUSE_ALL` | Queries pretty-printer and formatter with syntax highlighting | | -| [`ENABLE_CLICKHOUSE_GIT_IMPORT`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L40) | `ENABLE_CLICKHOUSE_ALL` | A tool to analyze Git repositories | https://presentations.clickhouse.tech/matemarketing_2020/ | -| [`ENABLE_CLICKHOUSE_INSTALL`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L44) | `OFF` | Install ClickHouse without .deb/.rpm/.tgz packages (having the binary only) | | -| [`ENABLE_CLICKHOUSE_LOCAL`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L15) | `ENABLE_CLICKHOUSE_ALL` | Local files fast processing mode | https://clickhouse.tech/docs/en/operations/utilities/clickhouse-local/ | -| [`ENABLE_CLICKHOUSE_OBFUSCATOR`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L32) | `ENABLE_CLICKHOUSE_ALL` | Table data obfuscator (convert real data to benchmark-ready one) | https://clickhouse.tech/docs/en/operations/utilities/clickhouse-obfuscator/ | -| [`ENABLE_CLICKHOUSE_ODBC_BRIDGE`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L36) | `ENABLE_CLICKHOUSE_ALL` | HTTP-server working like a proxy to ODBC driver | https://clickhouse.tech/docs/en/operations/utilities/odbc-bridge/ | -| [`ENABLE_CLICKHOUSE_SERVER`](https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L10) | `ENABLE_CLICKHOUSE_ALL` | Server mode (main mode) | | - -### External libraries -Note that ClickHouse uses forks of these libraries, see https://github.com/ClickHouse-Extras. - -| Name | Default value | Description | Comment | -|------|---------------|-------------|---------| -| [`ENABLE_AMQPCPP`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/amqpcpp.cmake#L1) | `ENABLE_LIBRARIES` | Enalbe AMQP-CPP | | -| [`ENABLE_AVRO`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/avro.cmake#L2) | `ENABLE_LIBRARIES` | Enable Avro | Needed when using Apache Avro serialization format | -| [`ENABLE_BASE64`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/base64.cmake#L1) | `ENABLE_LIBRARIES` | Enable base64 | | -| [`ENABLE_BROTLI`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/brotli.cmake#L1) | `ENABLE_LIBRARIES` | Enable brotli | | -| [`ENABLE_CAPNP`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/capnp.cmake#L1) | `ENABLE_LIBRARIES` | Enable Cap'n Proto | | -| [`ENABLE_CASSANDRA`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/cassandra.cmake#L1) | `ENABLE_LIBRARIES` | Enable Cassandra | | -| [`ENABLE_CCACHE`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/ccache.cmake#L22) | `ENABLE_CCACHE_BY_DEFAULT` | Speedup re-compilations using ccache (external tool) | https://ccache.dev/ | -| [`ENABLE_CLANG_TIDY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/analysis.cmake#L2) | `OFF` | Use clang-tidy static analyzer | https://clang.llvm.org/extra/clang-tidy/ | -| [`ENABLE_CURL`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/curl.cmake#L1) | `ENABLE_LIBRARIES` | Enable curl | | -| [`ENABLE_EMBEDDED_COMPILER`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/llvm.cmake#L5) | `ENABLE_LIBRARIES` | Set to TRUE to enable support for 'compile_expressions' option for query execution | | -| [`ENABLE_FASTOPS`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/fastops.cmake#L2) | `ENABLE_LIBRARIES` | Enable fast vectorized mathematical functions library by Mikhail Parakhin | | -| [`ENABLE_GPERF`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/gperf.cmake#L5) | `ENABLE_LIBRARIES` | Use gperf function hash generator tool | | -| [`ENABLE_GRPC`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/grpc.cmake#L8) | `ENABLE_GRPC_DEFAULT` | Use gRPC | | -| [`ENABLE_GSASL_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/libgsasl.cmake#L1) | `ENABLE_LIBRARIES` | Enable gsasl library | | -| [`ENABLE_H3`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/h3.cmake#L1) | `ENABLE_LIBRARIES` | Enable H3 | | -| [`ENABLE_HDFS`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/hdfs3.cmake#L2) | `ENABLE_LIBRARIES` | Enable HDFS | | -| [`ENABLE_ICU`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/icu.cmake#L2) | `ENABLE_LIBRARIES` | Enable ICU | | -| [`ENABLE_LDAP`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/ldap.cmake#L5) | `ENABLE_LIBRARIES` | Enable LDAP | | -| [`ENABLE_LIBPQXX`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/libpqxx.cmake#L1) | `ENABLE_LIBRARIES` | Enalbe libpqxx | | -| [`ENABLE_MSGPACK`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/msgpack.cmake#L1) | `ENABLE_LIBRARIES` | Enable msgpack library | | -| [`ENABLE_MYSQL`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/mysqlclient.cmake#L2) | `ENABLE_LIBRARIES` | Enable MySQL | | -| [`ENABLE_NURAFT`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/nuraft.cmake#L1) | `ENABLE_LIBRARIES` | Enable NuRaft | | -| [`ENABLE_ODBC`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/odbc.cmake#L1) | `ENABLE_LIBRARIES` | Enable ODBC library | | -| [`ENABLE_ORC`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/orc.cmake#L1) | `ENABLE_LIBRARIES` | Enable ORC | | -| [`ENABLE_PARQUET`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/parquet.cmake#L2) | `ENABLE_LIBRARIES` | Enable parquet | | -| [`ENABLE_PROTOBUF`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/protobuf.cmake#L1) | `ENABLE_LIBRARIES` | Enable protobuf | | -| [`ENABLE_RAPIDJSON`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rapidjson.cmake#L1) | `ENABLE_LIBRARIES` | Use rapidjson | | -| [`ENABLE_RDKAFKA`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rdkafka.cmake#L1) | `ENABLE_LIBRARIES` | Enable kafka | | -| [`ENABLE_ROCKSDB`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rocksdb.cmake#L1) | `ENABLE_LIBRARIES` | Enable ROCKSDB | | -| [`ENABLE_S3`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/s3.cmake#L2) | `ENABLE_LIBRARIES` | Enable S3 | | -| [`ENABLE_SSL`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/ssl.cmake#L3) | `ENABLE_LIBRARIES` | Enable ssl | Needed when securely connecting to an external server, e.g. clickhouse-client --host ... --secure | -| [`ENABLE_STATS`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/stats.cmake#L1) | `ENABLE_LIBRARIES` | Enalbe StatsLib library | | - - -### External libraries system/bundled mode - -| Name | Default value | Description | Comment | -|------|---------------|-------------|---------| -| [`USE_INTERNAL_AVRO_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/avro.cmake#L11) | `ON` | Set to FALSE to use system avro library instead of bundled | | -| [`USE_INTERNAL_AWS_S3_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/s3.cmake#L14) | `ON` | Set to FALSE to use system S3 instead of bundled (experimental set to OFF on your own risk) | | -| [`USE_INTERNAL_BROTLI_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/brotli.cmake#L12) | `USE_STATIC_LIBRARIES` | Set to FALSE to use system libbrotli library instead of bundled | Many system ship only dynamic brotly libraries, so we back off to bundled by default | -| [`USE_INTERNAL_CAPNP_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/capnp.cmake#L10) | `NOT_UNBUNDLED` | Set to FALSE to use system capnproto library instead of bundled | | -| [`USE_INTERNAL_CURL`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/curl.cmake#L10) | `NOT_UNBUNDLED` | Use internal curl library | | -| [`USE_INTERNAL_GRPC_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/grpc.cmake#L25) | `NOT_UNBUNDLED` | Set to FALSE to use system gRPC library instead of bundled. (Experimental. Set to OFF on your own risk) | Normally we use the internal gRPC framework. You can set USE_INTERNAL_GRPC_LIBRARY to OFF to force using the external gRPC framework, which should be installed in the system in this case. The external gRPC framework can be installed in the system by running sudo apt-get install libgrpc++-dev protobuf-compiler-grpc | -| [`USE_INTERNAL_GTEST_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/gtest.cmake#L3) | `NOT_UNBUNDLED` | Set to FALSE to use system Google Test instead of bundled | | -| [`USE_INTERNAL_H3_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/h3.cmake#L9) | `ON` | Set to FALSE to use system h3 library instead of bundled | | -| [`USE_INTERNAL_HDFS3_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/hdfs3.cmake#L14) | `ON` | Set to FALSE to use system HDFS3 instead of bundled (experimental - set to OFF on your own risk) | | -| [`USE_INTERNAL_ICU_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/icu.cmake#L15) | `NOT_UNBUNDLED` | Set to FALSE to use system ICU library instead of bundled | | -| [`USE_INTERNAL_LDAP_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/ldap.cmake#L14) | `NOT_UNBUNDLED` | Set to FALSE to use system *LDAP library instead of bundled | | -| [`USE_INTERNAL_LIBCXX_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/cxx.cmake#L15) | `USE_INTERNAL_LIBCXX_LIBRARY_DEFAULT` | Disable to use system libcxx and libcxxabi libraries instead of bundled | | -| [`USE_INTERNAL_LIBGSASL_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/libgsasl.cmake#L12) | `USE_STATIC_LIBRARIES` | Set to FALSE to use system libgsasl library instead of bundled | when USE_STATIC_LIBRARIES we usually need to pick up hell a lot of dependencies for libgsasl | -| [`USE_INTERNAL_LIBXML2_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/libxml2.cmake#L1) | `NOT_UNBUNDLED` | Set to FALSE to use system libxml2 library instead of bundled | | -| [`USE_INTERNAL_LLVM_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/llvm.cmake#L8) | `NOT_UNBUNDLED` | Use bundled or system LLVM library. | | -| [`USE_INTERNAL_MSGPACK_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/msgpack.cmake#L10) | `NOT_UNBUNDLED` | Set to FALSE to use system msgpack library instead of bundled | | -| [`USE_INTERNAL_MYSQL_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/mysqlclient.cmake#L15) | `NOT_UNBUNDLED` | Set to FALSE to use system mysqlclient library instead of bundled | | -| [`USE_INTERNAL_ODBC_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/odbc.cmake#L22) | `NOT_UNBUNDLED` | Use internal ODBC library | | -| [`USE_INTERNAL_ORC_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/orc.cmake#L11) | `ON` | Set to FALSE to use system ORC instead of bundled (experimental set to OFF on your own risk) | | -| [`USE_INTERNAL_PARQUET_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/parquet.cmake#L16) | `NOT_UNBUNDLED` | Set to FALSE to use system parquet library instead of bundled | | -| [`USE_INTERNAL_POCO_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/poco.cmake#L1) | `ON` | Use internal Poco library | | -| [`USE_INTERNAL_PROTOBUF_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/protobuf.cmake#L14) | `NOT_UNBUNDLED` | Set to FALSE to use system protobuf instead of bundled. (Experimental. Set to OFF on your own risk) | Normally we use the internal protobuf library. You can set USE_INTERNAL_PROTOBUF_LIBRARY to OFF to force using the external protobuf library, which should be installed in the system in this case. The external protobuf library can be installed in the system by running sudo apt-get install libprotobuf-dev protobuf-compiler libprotoc-dev | -| [`USE_INTERNAL_RAPIDJSON_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rapidjson.cmake#L9) | `NOT_UNBUNDLED` | Set to FALSE to use system rapidjson library instead of bundled | | -| [`USE_INTERNAL_RDKAFKA_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rdkafka.cmake#L10) | `NOT_UNBUNDLED` | Set to FALSE to use system librdkafka instead of the bundled | | -| [`USE_INTERNAL_RE2_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/re2.cmake#L1) | `NOT_UNBUNDLED` | Set to FALSE to use system re2 library instead of bundled [slower] | | -| [`USE_INTERNAL_ROCKSDB_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/rocksdb.cmake#L10) | `NOT_UNBUNDLED` | Set to FALSE to use system ROCKSDB library instead of bundled | | -| [`USE_INTERNAL_SNAPPY_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/snappy.cmake#L10) | `NOT_UNBUNDLED` | Set to FALSE to use system snappy library instead of bundled | | -| [`USE_INTERNAL_SPARSEHASH_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/sparsehash.cmake#L1) | `ON` | Set to FALSE to use system sparsehash library instead of bundled | | -| [`USE_INTERNAL_SSL_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/ssl.cmake#L12) | `NOT_UNBUNDLED` | Set to FALSE to use system *ssl library instead of bundled | | -| [`USE_INTERNAL_ZLIB_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/zlib.cmake#L1) | `NOT_UNBUNDLED` | Set to FALSE to use system zlib library instead of bundled | | -| [`USE_INTERNAL_ZSTD_LIBRARY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/zstd.cmake#L1) | `NOT_UNBUNDLED` | Set to FALSE to use system zstd library instead of bundled | | - - -### Other flags - -| Name | Default value | Description | Comment | -|------|---------------|-------------|---------| -| [`ADD_GDB_INDEX_FOR_GOLD`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L195) | `OFF` | Add .gdb-index to resulting binaries for gold linker. | Ignored if `lld` is used | -| [`ARCH_NATIVE`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L248) | `OFF` | Add -march=native compiler flag | | -| [`CLICKHOUSE_SPLIT_BINARY`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L98) | `OFF` | Make several binaries (clickhouse-server, clickhouse-client etc.) instead of one bundled | | -| [`COMPILER_PIPE`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L235) | `ON` | -pipe compiler option | Less `/tmp` usage, more RAM usage. | -| [`ENABLE_CHECK_HEAVY_BUILDS`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L69) | `OFF` | Don't allow C++ translation units to compile too long or to take too much memory while compiling | | -| [`ENABLE_FUZZING`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L115) | `OFF` | Fuzzy testing using libfuzzer | Implies `WITH_COVERAGE` | -| [`ENABLE_LIBRARIES`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L357) | `ON` | Enable all external libraries by default | Turns on all external libs like s3, kafka, ODBC, ... | -| [`ENABLE_MULTITARGET_CODE`](https://github.com/clickhouse/clickhouse/blob/master/src/Functions/CMakeLists.txt#L100) | `ON` | Enable platform-dependent code | ClickHouse developers may use platform-dependent code under some macro (e.g. `ifdef ENABLE_MULTITARGET`). If turned ON, this option defines such macro. See `src/Functions/TargetSpecific.h` | -| [`ENABLE_TESTS`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L154) | `ON` | Provide unit_test_dbms target with Google.Test unit tests | If turned `ON`, assumes the user has either the system GTest library or the bundled one. | -| [`ENABLE_THINLTO`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L313) | `ON` | Clang-specific link time optimization | https://clang.llvm.org/docs/ThinLTO.html Applies to clang only. Disabled when building with tests or sanitizers. | -| [`FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L32) | `ON` | Stop/Fail CMake configuration if some ENABLE_XXX option is defined (either ON or OFF) but is not possible to satisfy | If turned off: e.g. when ENABLE_FOO is ON, but FOO tool was not found, the CMake will continue. | -| [`GLIBC_COMPATIBILITY`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L159) | `ON` | Enable compatibility with older glibc libraries. | Only for Linux, x86_64. Implies `ENABLE_FASTMEMCPY` | -| [`LINKER_NAME`](https://github.com/clickhouse/clickhouse/blob/master/cmake/tools.cmake#L44) | `OFF` | Linker name or full path | Example values: `lld-10`, `gold`. | -| [`LLVM_HAS_RTTI`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/llvm.cmake#L40) | `ON` | Enable if LLVM was build with RTTI enabled | | -| [`MAKE_STATIC_LIBRARIES`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L91) | `USE_STATIC_LIBRARIES` | Disable to make shared libraries | | -| [`PARALLEL_COMPILE_JOBS`](https://github.com/clickhouse/clickhouse/blob/master/cmake/limit_jobs.cmake#L10) | `""` | Maximum number of concurrent compilation jobs | 1 if not set | -| [`PARALLEL_LINK_JOBS`](https://github.com/clickhouse/clickhouse/blob/master/cmake/limit_jobs.cmake#L13) | `""` | Maximum number of concurrent link jobs | 1 if not set | -| [`SANITIZE`](https://github.com/clickhouse/clickhouse/blob/master/cmake/sanitize.cmake#L7) | `""` | Enable one of the code sanitizers | Possible values: - `address` (ASan) - `memory` (MSan) - `thread` (TSan) - `undefined` (UBSan) - "" (no sanitizing) | -| [`SPLIT_SHARED_LIBRARIES`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L96) | `OFF` | Keep all internal libraries as separate .so files | DEVELOPER ONLY. Faster linking if turned on. | -| [`STRIP_DEBUG_SYMBOLS_FUNCTIONS`](https://github.com/clickhouse/clickhouse/blob/master/src/Functions/CMakeLists.txt#L49) | `STRIP_DSF_DEFAULT` | Do not generate debugger info for ClickHouse functions | Provides faster linking and lower binary size. Tradeoff is the inability to debug some source files with e.g. gdb (empty stack frames and no local variables)." | -| [`UNBUNDLED`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L363) | `OFF` | Use system libraries instead of ones in contrib/ | We recommend avoiding this mode for production builds because we can't guarantee all needed libraries exist in your system. This mode exists for enthusiastic developers who are searching for trouble. Useful for maintainers of OS packages. | -| [`USE_INCLUDE_WHAT_YOU_USE`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L418) | `OFF` | Automatically reduce unneeded includes in source code (external tool) | https://github.com/include-what-you-use/include-what-you-use | -| [`USE_LIBCXX`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/cxx.cmake#L1) | `NOT_UNBUNDLED` | Use libc++ and libc++abi instead of libstdc++ | | -| [`USE_SENTRY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/sentry.cmake#L13) | `ENABLE_LIBRARIES` | Use Sentry | | -| [`USE_SIMDJSON`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/simdjson.cmake#L1) | `ENABLE_LIBRARIES` | Use simdjson | | -| [`USE_SNAPPY`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/snappy.cmake#L1) | `ENABLE_LIBRARIES` | Enable snappy library | | -| [`USE_STATIC_LIBRARIES`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L90) | `ON` | Disable to use shared libraries | | -| [`USE_UNWIND`](https://github.com/clickhouse/clickhouse/blob/master/cmake/find/unwind.cmake#L1) | `ENABLE_LIBRARIES` | Enable libunwind (better stacktraces) | | -| [`WERROR`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L373) | `OFF` | Enable -Werror compiler option | Using system libs can cause a lot of warnings in includes (on macro expansion). | -| [`WEVERYTHING`](https://github.com/clickhouse/clickhouse/blob/master/cmake/warnings.cmake#L22) | `ON` | Enable -Weverything option with some exceptions. | Add some warnings that are not available even with -Wall -Wextra -Wpedantic. Intended for exploration of new compiler warnings that may be found useful. Applies to clang only | -| [`WITH_COVERAGE`](https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L274) | `OFF` | Profile the resulting binary/binaries | Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc | - -## Developer's guide for adding new CMake options - -### Don't be obvious. Be informative. - -Bad: -```cmake -option (ENABLE_TESTS "Enables testing" OFF) -``` - -This description is quite useless as is neither gives the viewer any additional information nor explains the option purpose. - -Better: - -```cmake -option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF) -``` - -If the option's purpose can't be guessed by its name, or the purpose guess may be misleading, or option has some -pre-conditions, leave a comment above the `option()` line and explain what it does. -The best way would be linking the docs page (if it exists). -The comment is parsed into a separate column (see below). - -Even better: - -```cmake -# implies ${TESTS_ARE_ENABLED} -# see tests/CMakeLists.txt for implementation detail. -option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF) -``` - -### If the option's state could produce unwanted (or unusual) result, explicitly warn the user. - -Suppose you have an option that may strip debug symbols from the ClickHouse's part. -This can speed up the linking process, but produces a binary that cannot be debugged. -In that case, prefer explicitly raising a warning telling the developer that he may be doing something wrong. -Also, such options should be disabled if applies. - -Bad: -```cmake -option(STRIP_DEBUG_SYMBOLS_FUNCTIONS - "Do not generate debugger info for ClickHouse functions. - ${STRIP_DSF_DEFAULT}) - -if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) - target_compile_options(clickhouse_functions PRIVATE "-g0") -endif() - -``` -Better: - -```cmake -# Provides faster linking and lower binary size. -# Tradeoff is the inability to debug some source files with e.g. gdb -# (empty stack frames and no local variables)." -option(STRIP_DEBUG_SYMBOLS_FUNCTIONS - "Do not generate debugger info for ClickHouse functions." - ${STRIP_DSF_DEFAULT}) - -if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) - message(WARNING "Not generating debugger info for ClickHouse functions") - target_compile_options(clickhouse_functions PRIVATE "-g0") -endif() -``` - -### In the option's description, explain WHAT the option does rather than WHY it does something. - -The WHY explanation should be placed in the comment. -You may find that the option's name is self-descriptive. - -Bad: - -```cmake -option(ENABLE_THINLTO "Enable Thin LTO. Only applicable for clang. It's also suppressed when building with tests or sanitizers." ON) -``` - -Better: - -```cmake -# Only applicable for clang. -# Turned off when building with tests or sanitizers. -option(ENABLE_THINLTO "Clang-specific link time optimisation" ON). -``` - -### Don't assume other developers know as much as you do. - -In ClickHouse, there are many tools used that an ordinary developer may not know. If you are in doubt, give a link to -the tool's docs. It won't take much of your time. - -Bad: - -```cmake -option(ENABLE_THINLTO "Enable Thin LTO. Only applicable for clang. It's also suppressed when building with tests or sanitizers." ON) -``` - -Better (combined with the above hint): - -```cmake -# https://clang.llvm.org/docs/ThinLTO.html -# Only applicable for clang. -# Turned off when building with tests or sanitizers. -option(ENABLE_THINLTO "Clang-specific link time optimisation" ON). -``` - -Other example, bad: - -```cmake -option (USE_INCLUDE_WHAT_YOU_USE "Use 'include-what-you-use' tool" OFF) -``` - -Better: - -```cmake -# https://github.com/include-what-you-use/include-what-you-use -option (USE_INCLUDE_WHAT_YOU_USE "Reduce unneeded #include s (external tool)" OFF) -``` - -### Prefer consistent default values. - -CMake allows you to pass a plethora of values representing boolean `true/false`, e.g. `1, ON, YES, ...`. -Prefer the `ON/OFF` values, if possible. diff --git a/docs/en/development/contrib.md b/docs/en/development/contrib.md index 76a2f647231..64ca2387029 100644 --- a/docs/en/development/contrib.md +++ b/docs/en/development/contrib.md @@ -5,36 +5,87 @@ toc_title: Third-Party Libraries Used # Third-Party Libraries Used {#third-party-libraries-used} -| Library | License | -|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| base64 | [BSD 2-Clause License](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | -| boost | [Boost Software License 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | -| brotli | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | -| capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | -| cctz | [Apache License 2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | -| double-conversion | [BSD 3-Clause License](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | -| FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | -| googletest | [BSD 3-Clause License](https://github.com/google/googletest/blob/master/LICENSE) | -| h3 | [Apache License 2.0](https://github.com/uber/h3/blob/master/LICENSE) | -| hyperscan | [BSD 3-Clause License](https://github.com/intel/hyperscan/blob/master/LICENSE) | -| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | -| libdivide | [Zlib License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | -| libgsasl | [LGPL v2.1](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | -| libhdfs3 | [Apache License 2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | -| libmetrohash | [Apache License 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | -| libpcg-random | [Apache License 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | -| libressl | [OpenSSL License](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | -| librdkafka | [BSD 2-Clause License](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | -| libwidechar_width | [CC0 1.0 Universal](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | -| llvm | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | -| lz4 | [BSD 2-Clause License](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | -| mariadb-connector-c | [LGPL v2.1](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | -| murmurhash | [Public Domain](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | -| pdqsort | [Zlib License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | -| poco | [Boost Software License - Version 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | -| protobuf | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | -| re2 | [BSD 3-Clause License](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | -| sentry-native | [MIT License](https://github.com/getsentry/sentry-native/blob/master/LICENSE) | -| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | -| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | -| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) | +The list of third-party libraries can be obtained by the following query: + +``` +SELECT library_name, license_type, license_path FROM system.licenses ORDER BY library_name COLLATE 'en' +``` + +[Example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIGxpYnJhcnlfbmFtZSwgbGljZW5zZV90eXBlLCBsaWNlbnNlX3BhdGggRlJPTSBzeXN0ZW0ubGljZW5zZXMgT1JERVIgQlkgbGlicmFyeV9uYW1lIENPTExBVEUgJ2VuJw==) + +| library_name | license_type | license_path | +|:-|:-|:-| +| abseil-cpp | Apache | /contrib/abseil-cpp/LICENSE | +| AMQP-CPP | Apache | /contrib/AMQP-CPP/LICENSE | +| arrow | Apache | /contrib/arrow/LICENSE.txt | +| avro | Apache | /contrib/avro/LICENSE.txt | +| aws | Apache | /contrib/aws/LICENSE.txt | +| aws-c-common | Apache | /contrib/aws-c-common/LICENSE | +| aws-c-event-stream | Apache | /contrib/aws-c-event-stream/LICENSE | +| aws-checksums | Apache | /contrib/aws-checksums/LICENSE | +| base64 | BSD 2-clause | /contrib/base64/LICENSE | +| boost | Boost | /contrib/boost/LICENSE_1_0.txt | +| boringssl | BSD | /contrib/boringssl/LICENSE | +| brotli | MIT | /contrib/brotli/LICENSE | +| capnproto | MIT | /contrib/capnproto/LICENSE | +| cassandra | Apache | /contrib/cassandra/LICENSE.txt | +| cctz | Apache | /contrib/cctz/LICENSE.txt | +| cityhash102 | MIT | /contrib/cityhash102/COPYING | +| cppkafka | BSD 2-clause | /contrib/cppkafka/LICENSE | +| croaring | Apache | /contrib/croaring/LICENSE | +| curl | Apache | /contrib/curl/docs/LICENSE-MIXING.md | +| cyrus-sasl | BSD 2-clause | /contrib/cyrus-sasl/COPYING | +| double-conversion | BSD 3-clause | /contrib/double-conversion/LICENSE | +| dragonbox | Apache | /contrib/dragonbox/LICENSE-Apache2-LLVM | +| fast_float | Apache | /contrib/fast_float/LICENSE | +| fastops | MIT | /contrib/fastops/LICENSE | +| flatbuffers | Apache | /contrib/flatbuffers/LICENSE.txt | +| fmtlib | Unknown | /contrib/fmtlib/LICENSE.rst | +| gcem | Apache | /contrib/gcem/LICENSE | +| googletest | BSD 3-clause | /contrib/googletest/LICENSE | +| grpc | Apache | /contrib/grpc/LICENSE | +| h3 | Apache | /contrib/h3/LICENSE | +| hyperscan | Boost | /contrib/hyperscan/LICENSE | +| icu | Public Domain | /contrib/icu/icu4c/LICENSE | +| icudata | Public Domain | /contrib/icudata/LICENSE | +| jemalloc | BSD 2-clause | /contrib/jemalloc/COPYING | +| krb5 | MIT | /contrib/krb5/src/lib/gssapi/LICENSE | +| libc-headers | LGPL | /contrib/libc-headers/LICENSE | +| libcpuid | BSD 2-clause | /contrib/libcpuid/COPYING | +| libcxx | Apache | /contrib/libcxx/LICENSE.TXT | +| libcxxabi | Apache | /contrib/libcxxabi/LICENSE.TXT | +| libdivide | zLib | /contrib/libdivide/LICENSE.txt | +| libfarmhash | MIT | /contrib/libfarmhash/COPYING | +| libgsasl | LGPL | /contrib/libgsasl/LICENSE | +| libhdfs3 | Apache | /contrib/libhdfs3/LICENSE.txt | +| libmetrohash | Apache | /contrib/libmetrohash/LICENSE | +| libpq | Unknown | /contrib/libpq/COPYRIGHT | +| libpqxx | BSD 3-clause | /contrib/libpqxx/COPYING | +| librdkafka | MIT | /contrib/librdkafka/LICENSE.murmur2 | +| libunwind | Apache | /contrib/libunwind/LICENSE.TXT | +| libuv | BSD | /contrib/libuv/LICENSE | +| llvm | Apache | /contrib/llvm/llvm/LICENSE.TXT | +| lz4 | BSD | /contrib/lz4/LICENSE | +| mariadb-connector-c | LGPL | /contrib/mariadb-connector-c/COPYING.LIB | +| miniselect | Boost | /contrib/miniselect/LICENSE_1_0.txt | +| msgpack-c | Boost | /contrib/msgpack-c/LICENSE_1_0.txt | +| murmurhash | Public Domain | /contrib/murmurhash/LICENSE | +| NuRaft | Apache | /contrib/NuRaft/LICENSE | +| openldap | Unknown | /contrib/openldap/LICENSE | +| orc | Apache | /contrib/orc/LICENSE | +| poco | Boost | /contrib/poco/LICENSE | +| protobuf | BSD 3-clause | /contrib/protobuf/LICENSE | +| rapidjson | MIT | /contrib/rapidjson/bin/jsonschema/LICENSE | +| re2 | BSD 3-clause | /contrib/re2/LICENSE | +| replxx | BSD 3-clause | /contrib/replxx/LICENSE.md | +| rocksdb | BSD 3-clause | /contrib/rocksdb/LICENSE.leveldb | +| sentry-native | MIT | /contrib/sentry-native/LICENSE | +| simdjson | Apache | /contrib/simdjson/LICENSE | +| snappy | Public Domain | /contrib/snappy/COPYING | +| sparsehash-c11 | BSD 3-clause | /contrib/sparsehash-c11/LICENSE | +| stats | Apache | /contrib/stats/LICENSE | +| thrift | Apache | /contrib/thrift/LICENSE | +| unixodbc | LGPL | /contrib/unixodbc/COPYING | +| xz | Public Domain | /contrib/xz/COPYING | +| zlib-ng | zLib | /contrib/zlib-ng/LICENSE.md | +| zstd | BSD | /contrib/zstd/LICENSE | diff --git a/docs/en/development/developer-instruction.md b/docs/en/development/developer-instruction.md index 5511e8e19c7..35ca4725af8 100644 --- a/docs/en/development/developer-instruction.md +++ b/docs/en/development/developer-instruction.md @@ -131,17 +131,18 @@ ClickHouse uses several external libraries for building. All of them do not need ## C++ Compiler {#c-compiler} -Compilers GCC starting from version 10 and Clang version 8 or above are supported for building ClickHouse. +Compilers Clang starting from version 11 is supported for building ClickHouse. -Official Yandex builds currently use GCC because it generates machine code of slightly better performance (yielding a difference of up to several percent according to our benchmarks). And Clang is more convenient for development usually. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations. +Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations. -To install GCC on Ubuntu run: `sudo apt install gcc g++` +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -Check the version of gcc: `gcc --version`. If it is below 10, then follow the instruction here: https://clickhouse.tech/docs/en/development/build/#install-gcc-10. +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" +``` -Mac OS X build is supported only for Clang. Just run `brew install llvm` +Mac OS X build is also supported. Just run `brew install llvm` -If you decide to use Clang, you can also install `libc++` and `lld`, if you know what it is. Using `ccache` is also recommended. ## The Building Process {#the-building-process} @@ -152,14 +153,7 @@ Now that you are ready to build ClickHouse we recommend you to create a separate You can have several different directories (build_release, build_debug, etc.) for different types of build. -While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler (version 10 gcc compiler in this example). - -Linux: - - export CC=gcc-10 CXX=g++-10 - cmake .. - -Mac OS X: +While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler. export CC=clang CXX=clang++ cmake .. diff --git a/docs/en/development/style.md b/docs/en/development/style.md index 4c620c44aef..b27534d9890 100644 --- a/docs/en/development/style.md +++ b/docs/en/development/style.md @@ -701,7 +701,7 @@ But other things being equal, cross-platform or portable code is preferred. **2.** Language: C++20 (see the list of available [C++20 features](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)). -**3.** Compiler: `gcc`. At this time (August 2020), the code is compiled using version 9.3. (It can also be compiled using `clang 8`.) +**3.** Compiler: `clang`. At this time (April 2021), the code is compiled using clang version 11. (It can also be compiled using `gcc` version 10, but it's untested and not suitable for production usage). The standard library is used (`libc++`). @@ -711,7 +711,7 @@ The standard library is used (`libc++`). The CPU instruction set is the minimum supported set among our servers. Currently, it is SSE 4.2. -**6.** Use `-Wall -Wextra -Werror` compilation flags. +**6.** Use `-Wall -Wextra -Werror` compilation flags. Also `-Weverything` is used with few exceptions. **7.** Use static linking with all libraries except those that are difficult to connect to statically (see the output of the `ldd` command). diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index ad5bebb3dea..4474b764d2e 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -94,10 +94,10 @@ postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2); INSERT 0 1 postgresql> SELECT * FROM test; - int_id | int_nullable | float | str | float_nullable ---------+--------------+-------+------+---------------- - 1 | | 2 | test | -(1 row) + int_id | int_nullable | float | str | float_nullable + --------+--------------+-------+------+---------------- + 1 | | 2 | test | + (1 row) ``` Table in ClickHouse, retrieving data from the PostgreSQL table created above: diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 3d02aa13812..a27308b9b3f 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -11,34 +11,34 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec ``` sql CREATE TABLE s3_engine_table (name String, value UInt32) -ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) +ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) ``` **Engine parameters** - `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path). - `format` — The [format](../../../interfaces/formats.md#formats) of the file. -- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. -- `compression` — Compression type. Supported values: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. Parameter is optional. By default, it will autodetect compression by file extension. +- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3). +- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension. -**Example:** +**Example** -**1.** Set up the `s3_engine_table` table: +1. Set up the `s3_engine_table` table: -```sql -CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip') +``` sql +CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip'); ``` -**2.** Fill file: +2. Fill file: -```sql -INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) +``` sql +INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); ``` -**3.** Query the data: +3. Query the data: -```sql -SELECT * FROM s3_engine_table LIMIT 2 +``` sql +SELECT * FROM s3_engine_table LIMIT 2; ``` ```text @@ -73,13 +73,63 @@ For more information about virtual columns see [here](../../../engines/table-eng Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function. -## S3-related Settings {#s3-settings} +**Example** + +1. Suppose we have several files in CSV format with the following URIs on S3: + +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv’ + +There are several ways to make a table consisting of all six files: + +The first way: + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV'); +``` + +Another way: + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV'); +``` + +Table consists of all the files in both directories (all files should satisfy format and schema described in query): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV'); +``` + +If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. + +**Example** + +Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`: + +``` sql +CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); +``` + +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**See Also** + +- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) + +## S3-related settings {#settings} The following settings can be set before query execution or placed into configuration file. -- `s3_max_single_part_upload_size` — The maximum size of object to upload using singlepart upload to S3. Default value is `64Mb`. +- `s3_max_single_part_upload_size` — The maximum size of object to upload using singlepart upload to S3. Default value is `64Mb`. - `s3_min_upload_part_size` — The minimum size of part to upload during multipart upload to [S3 Multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). Default value is `512Mb`. -- `s3_max_redirects` — Max number of S3 redirects hops allowed. Default value is `10`. +- `s3_max_redirects` — Max number of S3 redirects hops allowed. Default value is `10`. Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration. @@ -89,7 +139,9 @@ The following settings can be specified in configuration file for given endpoint - `endpoint` — Specifies prefix of an endpoint. Mandatory. - `access_key_id` and `secret_access_key` — Specifies credentials to use with given endpoint. Optional. -- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and Amazon EC2 metadata for given endpoint. Optional, default value is `false`. +- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and [Amazon EC2](https://en.wikipedia.org/wiki/Amazon_Elastic_Compute_Cloud) metadata for given endpoint. Optional, default value is `false`. +- `region` — Specifies S3 region name. Optional. +- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`. - `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times. - `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional. @@ -101,12 +153,15 @@ The following settings can be specified in configuration file for given endpoint https://storage.yandexcloud.net/my-test-bucket-768/ + + ``` + ## Usage {#usage-examples} Suppose we have several files in TSV format with the following URIs on HDFS: @@ -149,8 +204,7 @@ ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_p CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); ``` + ## See also - [S3 table function](../../../sql-reference/table-functions/s3.md) - -[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/s3/) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index a24b7229d17..8743090df41 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -17,7 +17,7 @@ Main features: - Partitions can be used if the [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) is specified. - ClickHouse supports certain operations with partitions that are more effective than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query. This also improves query performance. + ClickHouse supports certain operations with partitions that are more effective than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query. - Data replication support. @@ -191,9 +191,7 @@ Sparse indexes allow you to work with a very large number of table rows, because ClickHouse does not require a unique primary key. You can insert multiple rows with the same primary key. -You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses. To allow this feature, turn on the [allow_nullable_key](../../../operations/settings/settings.md#allow-nullable-key) setting. - -The [NULLS_LAST](../../../sql-reference/statements/select/order-by.md#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause. +You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses but it is strongly discouraged. To allow this feature, turn on the [allow_nullable_key](../../../operations/settings/settings.md#allow-nullable-key) setting. The [NULLS_LAST](../../../sql-reference/statements/select/order-by.md#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause. ### Selecting the Primary Key {#selecting-the-primary-key} @@ -741,6 +739,7 @@ Configuration markup: https://storage.yandexcloud.net/my-bucket/root-path/ your_access_key_id your_secret_access_key + your_base64_encoded_customer_key http://proxy1 @@ -766,7 +765,9 @@ Required parameters: - `secret_access_key` — S3 secret access key. Optional parameters: +- `region` — S3 region name. - `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`. +- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`. - `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL. - `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`. - `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`. diff --git a/docs/en/engines/table-engines/special/buffer.md b/docs/en/engines/table-engines/special/buffer.md index bf6c08f8f6c..8245cd19e8c 100644 --- a/docs/en/engines/table-engines/special/buffer.md +++ b/docs/en/engines/table-engines/special/buffer.md @@ -18,11 +18,17 @@ Engine parameters: - `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` of independent buffers. Recommended value: 16. - `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`, and `max_bytes` – Conditions for flushing data from the buffer. +Optional engine parameters: + +- `flush_time`, `flush_rows`, `flush_bytes` – Conditions for flushing data from the buffer, that will happen only in background (ommited or zero means no `flush*` parameters). + Data is flushed from the buffer and written to the destination table if all the `min*` conditions or at least one `max*` condition are met. -- `min_time`, `max_time` – Condition for the time in seconds from the moment of the first write to the buffer. -- `min_rows`, `max_rows` – Condition for the number of rows in the buffer. -- `min_bytes`, `max_bytes` – Condition for the number of bytes in the buffer. +Also if at least one `flush*` condition are met flush initiated in background, this is different from `max*`, since `flush*` allows you to configure background flushes separately to avoid adding latency for `INSERT` (into `Buffer`) queries. + +- `min_time`, `max_time`, `flush_time` – Condition for the time in seconds from the moment of the first write to the buffer. +- `min_rows`, `max_rows`, `flush_rows` – Condition for the number of rows in the buffer. +- `min_bytes`, `max_bytes`, `flush_bytes` – Condition for the number of bytes in the buffer. During the write operation, data is inserted to a `num_layers` number of random buffers. Or, if the data part to insert is large enough (greater than `max_rows` or `max_bytes`), it is written directly to the destination table, omitting the buffer. diff --git a/docs/en/getting-started/example-datasets/cell-towers.md b/docs/en/getting-started/example-datasets/cell-towers.md index 76effdd4c62..7028b650ad1 100644 --- a/docs/en/getting-started/example-datasets/cell-towers.md +++ b/docs/en/getting-started/example-datasets/cell-towers.md @@ -3,31 +3,31 @@ toc_priority: 21 toc_title: Cell Towers --- -# Cell Towers +# Cell Towers {#cell-towers} This dataset is from [OpenCellid](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers. -As of 2021 it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc). +As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc). -OpenCelliD Project is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License, and we redistribute a snapshot of this dataset under the terms of the same license. The up to date version of the dataset is available to download after sign in. +OpenCelliD Project is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License, and we redistribute a snapshot of this dataset under the terms of the same license. The up-to-date version of the dataset is available to download after sign in. -## Get the Dataset +## Get the Dataset {#get-the-dataset} -Download the snapshot of the dataset from Feb 2021: [https://datasets.clickhouse.tech/cell_towers.csv.xz] (729 MB). +1. Download the snapshot of the dataset from February 2021: [https://datasets.clickhouse.tech/cell_towers.csv.xz] (729 MB). -Optionally validate the integrity: +2. Validate the integrity (optional step): ``` md5sum cell_towers.csv.xz 8cf986f4a0d9f12c6f384a0e9192c908 cell_towers.csv.xz ``` -Decompress it with the following command: +3. Decompress it with the following command: ``` xz -d cell_towers.csv.xz ``` -Create a table: +4. Create a table: ``` CREATE TABLE cell_towers @@ -50,15 +50,15 @@ CREATE TABLE cell_towers ENGINE = MergeTree ORDER BY (radio, mcc, net, created); ``` -Insert the dataset: +5. Insert the dataset: ``` clickhouse-client --query "INSERT INTO cell_towers FORMAT CSVWithNames" < cell_towers.csv ``` +## Examples {#examples} -## Run some queries +1. A number of cell towers by type: -Number of cell towers by type: ``` SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC @@ -73,7 +73,8 @@ SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC 5 rows in set. Elapsed: 0.011 sec. Processed 43.28 million rows, 43.28 MB (3.83 billion rows/s., 3.83 GB/s.) ``` -Cell towers by mobile country code (MCC): +2. Cell towers by [mobile country code (MCC)](https://en.wikipedia.org/wiki/Mobile_country_code): + ``` SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10 @@ -93,28 +94,28 @@ SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10 10 rows in set. Elapsed: 0.019 sec. Processed 43.28 million rows, 86.55 MB (2.33 billion rows/s., 4.65 GB/s.) ``` -See the dictionary here: [https://en.wikipedia.org/wiki/Mobile_country_code](https://en.wikipedia.org/wiki/Mobile_country_code). +So, the top countries are: the USA, Germany, and Russia. -So, the top countries are USA, Germany and Russia. - -You may want to create an [External Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts/) in ClickHouse to decode these values. +You may want to create an [External Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) in ClickHouse to decode these values. -### Example of using `pointInPolygon` function +## Use case {#use-case} -Create a table where we will store polygons: +Using `pointInPolygon` function. + +1. Create a table where we will store polygons: ``` CREATE TEMPORARY TABLE moscow (polygon Array(Tuple(Float64, Float64))); ``` -This is a rough shape of Moscow (without "new Moscow"): +2. This is a rough shape of Moscow (without "new Moscow"): ``` INSERT INTO moscow VALUES ([(37.84172564285271, 55.78000432402266), (37.8381207618713, 55.775874525970494), (37.83979446823122, 55.775626746008065), (37.84243326983639, 55.77446586811748), (37.84262672750849, 55.771974101091104), (37.84153238623039, 55.77114545193181), (37.841124690460184, 55.76722010265554), (37.84239076983644, 55.76654891107098), (37.842283558197025, 55.76258709833121), (37.8421759312134, 55.758073999993734), (37.84198330422974, 55.75381499999371), (37.8416827275085, 55.749277102484484), (37.84157576190186, 55.74794544108413), (37.83897929098507, 55.74525257875241), (37.83739676451868, 55.74404373042019), (37.838732481460525, 55.74298009816793), (37.841183997352545, 55.743060321833575), (37.84097476190185, 55.73938799999373), (37.84048155819702, 55.73570799999372), (37.840095812164286, 55.73228210777237), (37.83983814285274, 55.73080491981639), (37.83846476321406, 55.729799917464675), (37.83835745269769, 55.72919751082619), (37.838636380279524, 55.72859509486539), (37.8395161005249, 55.727705075632784), (37.83897964285276, 55.722727886185154), (37.83862557539366, 55.72034817326636), (37.83559735744853, 55.71944437307499), (37.835370708803126, 55.71831419154461), (37.83738169402022, 55.71765218986692), (37.83823396494291, 55.71691750159089), (37.838056931213345, 55.71547311301385), (37.836812846557606, 55.71221445615604), (37.83522525396725, 55.709331054395555), (37.83269301586908, 55.70953687463627), (37.829667367706236, 55.70903403789297), (37.83311126588435, 55.70552351822608), (37.83058993121339, 55.70041317726053), (37.82983872750851, 55.69883771404813), (37.82934501586913, 55.69718947487017), (37.828926414016685, 55.69504441658371), (37.82876530422971, 55.69287499999378), (37.82894754100031, 55.690759754047335), (37.827697554878185, 55.68951421135665), (37.82447346292115, 55.68965045405069), (37.83136543914793, 55.68322046195302), (37.833554015869154, 55.67814012759211), (37.83544184655761, 55.67295011628339), (37.837480388885474, 55.6672498719639), (37.838960677246064, 55.66316274139358), (37.83926093121332, 55.66046999999383), (37.839025050262435, 55.65869897264431), (37.83670784390257, 55.65794084879904), (37.835656529083245, 55.65694309303843), (37.83704060449217, 55.65689306460552), (37.83696819873806, 55.65550363526252), (37.83760389616388, 55.65487847246661), (37.83687972750851, 55.65356745541324), (37.83515216004943, 55.65155951234079), (37.83312418518067, 55.64979413590619), (37.82801726983639, 55.64640836412121), (37.820614174591, 55.64164525405531), (37.818908190475426, 55.6421883258084), (37.81717543386075, 55.64112490388471), (37.81690987037274, 55.63916106913107), (37.815099354492155, 55.637925371757085), (37.808769150787356, 55.633798276884455), (37.80100123544311, 55.62873670012244), (37.79598013491824, 55.62554336109055), (37.78634567724606, 55.62033499605651), (37.78334147619623, 55.618768681480326), (37.77746201055901, 55.619855533402706), (37.77527329626457, 55.61909966711279), (37.77801986242668, 55.618770300976294), (37.778212973541216, 55.617257701952106), (37.77784818518065, 55.61574504433011), (37.77016867724609, 55.61148576294007), (37.760191219573976, 55.60599579539028), (37.75338926983641, 55.60227892751446), (37.746329965606634, 55.59920577639331), (37.73939925396728, 55.59631430313617), (37.73273665739439, 55.5935318803559), (37.7299954450912, 55.59350760316188), (37.7268679946899, 55.59469840523759), (37.72626726983634, 55.59229549697373), (37.7262673598022, 55.59081598950582), (37.71897193121335, 55.5877595845419), (37.70871550793456, 55.58393177431724), (37.700497489410374, 55.580917323756644), (37.69204305026244, 55.57778089778455), (37.68544477378839, 55.57815154690915), (37.68391050793454, 55.57472945079756), (37.678803592590306, 55.57328235936491), (37.6743402539673, 55.57255251445782), (37.66813862698363, 55.57216388774464), (37.617927457672096, 55.57505691895805), (37.60443099999999, 55.5757737568051), (37.599683515869145, 55.57749105910326), (37.59754177842709, 55.57796291823627), (37.59625834786988, 55.57906686095235), (37.59501783265684, 55.57746616444403), (37.593090671936025, 55.57671634534502), (37.587018007904, 55.577944600233785), (37.578692203704804, 55.57982895000019), (37.57327546607398, 55.58116294118248), (37.57385012109279, 55.581550362779), (37.57399562266922, 55.5820107079112), (37.5735356072979, 55.58226289171689), (37.57290393054962, 55.582393529795155), (37.57037722355653, 55.581919415056234), (37.5592298306885, 55.584471614867844), (37.54189249206543, 55.58867650795186), (37.5297256269836, 55.59158133551745), (37.517837865081766, 55.59443656218868), (37.51200186508174, 55.59635625174229), (37.506808949737554, 55.59907823904434), (37.49820432275389, 55.6062944994944), (37.494406071441674, 55.60967103463367), (37.494760001358024, 55.61066689753365), (37.49397137107085, 55.61220931698269), (37.49016528606031, 55.613417718449064), (37.48773249206542, 55.61530616333343), (37.47921386508177, 55.622640129112334), (37.470652153442394, 55.62993723476164), (37.46273446298218, 55.6368075123157), (37.46350692265317, 55.64068225239439), (37.46050283203121, 55.640794546982576), (37.457627470916734, 55.64118904154646), (37.450718034393326, 55.64690488145138), (37.44239252645875, 55.65397824729769), (37.434587576721185, 55.66053543155961), (37.43582144975277, 55.661693766520735), (37.43576786245721, 55.662755031737014), (37.430982915344174, 55.664610641628116), (37.428547447097685, 55.66778515273695), (37.42945134592044, 55.668633314343566), (37.42859571562949, 55.66948145750025), (37.4262836402282, 55.670813882451405), (37.418709037048295, 55.6811141674414), (37.41922139651101, 55.68235377885389), (37.419218771842885, 55.68359335082235), (37.417196501327446, 55.684375235224735), (37.41607020370478, 55.68540557585352), (37.415640857147146, 55.68686637150793), (37.414632153442334, 55.68903015131686), (37.413344899475064, 55.690896881757396), (37.41171432275391, 55.69264232162232), (37.40948282275393, 55.69455101638112), (37.40703674603271, 55.69638690385348), (37.39607169577025, 55.70451821283731), (37.38952706878662, 55.70942491932811), (37.387778313491815, 55.71149057784176), (37.39049275399779, 55.71419814298992), (37.385557272491454, 55.7155489617061), (37.38388335714726, 55.71849856042102), (37.378368238098155, 55.7292763261685), (37.37763597123337, 55.730845879211614), (37.37890062088197, 55.73167906388319), (37.37750451918789, 55.734703664681774), (37.375610832015965, 55.734851959522246), (37.3723813571472, 55.74105626086403), (37.37014935714723, 55.746115620904355), (37.36944173016362, 55.750883999993725), (37.36975304365541, 55.76335905525834), (37.37244070571134, 55.76432079697595), (37.3724259757175, 55.76636979670426), (37.369922155757884, 55.76735417953104), (37.369892695770275, 55.76823419316575), (37.370214730163575, 55.782312184391266), (37.370493611114505, 55.78436801120489), (37.37120164550783, 55.78596427165359), (37.37284851456452, 55.7874378183096), (37.37608325135799, 55.7886695054807), (37.3764587460632, 55.78947647305964), (37.37530000265506, 55.79146512926804), (37.38235915344241, 55.79899647809345), (37.384344043655396, 55.80113596939471), (37.38594269577028, 55.80322699999366), (37.38711208598329, 55.804919036911976), (37.3880239841309, 55.806610999993666), (37.38928977249147, 55.81001864976979), (37.39038389947512, 55.81348641242801), (37.39235781481933, 55.81983538336746), (37.393709457672124, 55.82417822811877), (37.394685720901464, 55.82792275755836), (37.39557615344238, 55.830447148154136), (37.39844478226658, 55.83167107969975), (37.40019761214057, 55.83151823557964), (37.400398790382326, 55.83264967594742), (37.39659544313046, 55.83322180909622), (37.39667059524539, 55.83402792148566), (37.39682089947515, 55.83638877400216), (37.39643489154053, 55.83861656112751), (37.3955338994751, 55.84072348043264), (37.392680272491454, 55.84502158126453), (37.39241188227847, 55.84659117913199), (37.392529730163616, 55.84816071336481), (37.39486835714723, 55.85288092980303), (37.39873052645878, 55.859893456073635), (37.40272161111449, 55.86441833633205), (37.40697072750854, 55.867579567544375), (37.410007082016016, 55.868369880337), (37.4120992989502, 55.86920843741314), (37.412668021163924, 55.87055369615854), (37.41482461111453, 55.87170587948249), (37.41862266137694, 55.873183961039565), (37.42413732540892, 55.874879126654704), (37.4312182698669, 55.875614937236705), (37.43111093783558, 55.8762723478417), (37.43332105622856, 55.87706546369396), (37.43385747619623, 55.87790681284802), (37.441303050262405, 55.88027084462084), (37.44747234260555, 55.87942070143253), (37.44716141796871, 55.88072960917233), (37.44769797085568, 55.88121221323979), (37.45204320500181, 55.882080694420715), (37.45673176190186, 55.882346110794586), (37.463383999999984, 55.88252729504517), (37.46682797486874, 55.88294937719063), (37.470014457672086, 55.88361266759345), (37.47751410450743, 55.88546991372396), (37.47860317658232, 55.88534929207307), (37.48165826025772, 55.882563306475106), (37.48316434442331, 55.8815803226785), (37.483831555817645, 55.882427612793315), (37.483182967125686, 55.88372791409729), (37.483092277908824, 55.88495581062434), (37.4855716508179, 55.8875561994203), (37.486440636245746, 55.887827444039566), (37.49014203439328, 55.88897899871799), (37.493210285705544, 55.890208937135604), (37.497512451065035, 55.891342397444696), (37.49780744510645, 55.89174030252967), (37.49940333499519, 55.89239745507079), (37.50018383334346, 55.89339220941865), (37.52421672750851, 55.903869074155224), (37.52977457672118, 55.90564076517974), (37.53503220370484, 55.90661661218259), (37.54042858064267, 55.90714113744566), (37.54320461007303, 55.905645048442985), (37.545686966066306, 55.906608607018505), (37.54743976120755, 55.90788552162358), (37.55796999999999, 55.90901557907218), (37.572711542327866, 55.91059395704873), (37.57942799999998, 55.91073854155573), (37.58502865872187, 55.91009969268444), (37.58739968913264, 55.90794809960554), (37.59131567193598, 55.908713267595054), (37.612687423278814, 55.902866854295375), (37.62348079629517, 55.90041967242986), (37.635797880950896, 55.898141151686396), (37.649487626983664, 55.89639275532968), (37.65619302513125, 55.89572360207488), (37.66294133862307, 55.895295577183965), (37.66874564418033, 55.89505457604897), (37.67375601586915, 55.89254677027454), (37.67744661901856, 55.8947775867987), (37.688347, 55.89450045676125), (37.69480554232789, 55.89422926332761), (37.70107096560668, 55.89322256101114), (37.705962965606716, 55.891763491662616), (37.711885134918205, 55.889110234998974), (37.71682005026245, 55.886577568759876), (37.7199315476074, 55.88458159806678), (37.72234560316464, 55.882281005794134), (37.72364385977171, 55.8809452036196), (37.725371142837474, 55.8809722706006), (37.727870902099546, 55.88037213862385), (37.73394330422971, 55.877941504088696), (37.745339592590376, 55.87208120378722), (37.75525267724611, 55.86703807949492), (37.76919976190188, 55.859821640197474), (37.827835219574, 55.82962968399116), (37.83341438888553, 55.82575289922351), (37.83652584655761, 55.82188784027888), (37.83809213491821, 55.81612575504693), (37.83605359521481, 55.81460347077685), (37.83632178569025, 55.81276696067908), (37.838623105812026, 55.811486181656385), (37.83912198147584, 55.807329380532785), (37.839079078033414, 55.80510270463816), (37.83965844708251, 55.79940712529036), (37.840581150787344, 55.79131399999368), (37.84172564285271, 55.78000432402266)]); ``` -Check how many cell towers are in Moscow: +3. Check how many cell towers are in Moscow: ``` SELECT count() FROM cell_towers WHERE pointInPolygon((lon, lat), (SELECT * FROM moscow)) @@ -128,6 +129,4 @@ SELECT count() FROM cell_towers WHERE pointInPolygon((lon, lat), (SELECT * FROM The data is also available for interactive queries in the [Playground](https://gh-api.clickhouse.tech/play?user=play), [example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=). -Although you cannot create temporary tables there. - -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/cell-towers/) +Although you cannot create temporary tables there. \ No newline at end of file diff --git a/docs/en/getting-started/example-datasets/ontime.md b/docs/en/getting-started/example-datasets/ontime.md index 83673cdceb6..f18acc6fd50 100644 --- a/docs/en/getting-started/example-datasets/ontime.md +++ b/docs/en/getting-started/example-datasets/ontime.md @@ -21,120 +21,121 @@ echo https://transtats.bts.gov/PREZIP/On_Time_Reporting_Carrier_On_Time_Performa Creating a table: ``` sql -CREATE TABLE `ontime` ( - `Year` UInt16, - `Quarter` UInt8, - `Month` UInt8, - `DayofMonth` UInt8, - `DayOfWeek` UInt8, - `FlightDate` Date, - `UniqueCarrier` FixedString(7), - `AirlineID` Int32, - `Carrier` FixedString(2), - `TailNum` String, - `FlightNum` String, - `OriginAirportID` Int32, - `OriginAirportSeqID` Int32, - `OriginCityMarketID` Int32, - `Origin` FixedString(5), - `OriginCityName` String, - `OriginState` FixedString(2), - `OriginStateFips` String, - `OriginStateName` String, - `OriginWac` Int32, - `DestAirportID` Int32, - `DestAirportSeqID` Int32, - `DestCityMarketID` Int32, - `Dest` FixedString(5), - `DestCityName` String, - `DestState` FixedString(2), - `DestStateFips` String, - `DestStateName` String, - `DestWac` Int32, - `CRSDepTime` Int32, - `DepTime` Int32, - `DepDelay` Int32, - `DepDelayMinutes` Int32, - `DepDel15` Int32, - `DepartureDelayGroups` String, - `DepTimeBlk` String, - `TaxiOut` Int32, - `WheelsOff` Int32, - `WheelsOn` Int32, - `TaxiIn` Int32, - `CRSArrTime` Int32, - `ArrTime` Int32, - `ArrDelay` Int32, - `ArrDelayMinutes` Int32, - `ArrDel15` Int32, - `ArrivalDelayGroups` Int32, - `ArrTimeBlk` String, - `Cancelled` UInt8, - `CancellationCode` FixedString(1), - `Diverted` UInt8, - `CRSElapsedTime` Int32, - `ActualElapsedTime` Int32, - `AirTime` Int32, - `Flights` Int32, - `Distance` Int32, - `DistanceGroup` UInt8, - `CarrierDelay` Int32, - `WeatherDelay` Int32, - `NASDelay` Int32, - `SecurityDelay` Int32, - `LateAircraftDelay` Int32, - `FirstDepTime` String, - `TotalAddGTime` String, - `LongestAddGTime` String, - `DivAirportLandings` String, - `DivReachedDest` String, - `DivActualElapsedTime` String, - `DivArrDelay` String, - `DivDistance` String, - `Div1Airport` String, - `Div1AirportID` Int32, - `Div1AirportSeqID` Int32, - `Div1WheelsOn` String, - `Div1TotalGTime` String, - `Div1LongestGTime` String, - `Div1WheelsOff` String, - `Div1TailNum` String, - `Div2Airport` String, - `Div2AirportID` Int32, - `Div2AirportSeqID` Int32, - `Div2WheelsOn` String, - `Div2TotalGTime` String, - `Div2LongestGTime` String, - `Div2WheelsOff` String, - `Div2TailNum` String, - `Div3Airport` String, - `Div3AirportID` Int32, - `Div3AirportSeqID` Int32, - `Div3WheelsOn` String, - `Div3TotalGTime` String, - `Div3LongestGTime` String, - `Div3WheelsOff` String, - `Div3TailNum` String, - `Div4Airport` String, - `Div4AirportID` Int32, - `Div4AirportSeqID` Int32, - `Div4WheelsOn` String, - `Div4TotalGTime` String, - `Div4LongestGTime` String, - `Div4WheelsOff` String, - `Div4TailNum` String, - `Div5Airport` String, - `Div5AirportID` Int32, - `Div5AirportSeqID` Int32, - `Div5WheelsOn` String, - `Div5TotalGTime` String, - `Div5LongestGTime` String, - `Div5WheelsOff` String, - `Div5TailNum` String +CREATE TABLE `ontime` +( + `Year` UInt16, + `Quarter` UInt8, + `Month` UInt8, + `DayofMonth` UInt8, + `DayOfWeek` UInt8, + `FlightDate` Date, + `Reporting_Airline` String, + `DOT_ID_Reporting_Airline` Int32, + `IATA_CODE_Reporting_Airline` String, + `Tail_Number` Int32, + `Flight_Number_Reporting_Airline` String, + `OriginAirportID` Int32, + `OriginAirportSeqID` Int32, + `OriginCityMarketID` Int32, + `Origin` FixedString(5), + `OriginCityName` String, + `OriginState` FixedString(2), + `OriginStateFips` String, + `OriginStateName` String, + `OriginWac` Int32, + `DestAirportID` Int32, + `DestAirportSeqID` Int32, + `DestCityMarketID` Int32, + `Dest` FixedString(5), + `DestCityName` String, + `DestState` FixedString(2), + `DestStateFips` String, + `DestStateName` String, + `DestWac` Int32, + `CRSDepTime` Int32, + `DepTime` Int32, + `DepDelay` Int32, + `DepDelayMinutes` Int32, + `DepDel15` Int32, + `DepartureDelayGroups` String, + `DepTimeBlk` String, + `TaxiOut` Int32, + `WheelsOff` Int32, + `WheelsOn` Int32, + `TaxiIn` Int32, + `CRSArrTime` Int32, + `ArrTime` Int32, + `ArrDelay` Int32, + `ArrDelayMinutes` Int32, + `ArrDel15` Int32, + `ArrivalDelayGroups` Int32, + `ArrTimeBlk` String, + `Cancelled` UInt8, + `CancellationCode` FixedString(1), + `Diverted` UInt8, + `CRSElapsedTime` Int32, + `ActualElapsedTime` Int32, + `AirTime` Nullable(Int32), + `Flights` Int32, + `Distance` Int32, + `DistanceGroup` UInt8, + `CarrierDelay` Int32, + `WeatherDelay` Int32, + `NASDelay` Int32, + `SecurityDelay` Int32, + `LateAircraftDelay` Int32, + `FirstDepTime` String, + `TotalAddGTime` String, + `LongestAddGTime` String, + `DivAirportLandings` String, + `DivReachedDest` String, + `DivActualElapsedTime` String, + `DivArrDelay` String, + `DivDistance` String, + `Div1Airport` String, + `Div1AirportID` Int32, + `Div1AirportSeqID` Int32, + `Div1WheelsOn` String, + `Div1TotalGTime` String, + `Div1LongestGTime` String, + `Div1WheelsOff` String, + `Div1TailNum` String, + `Div2Airport` String, + `Div2AirportID` Int32, + `Div2AirportSeqID` Int32, + `Div2WheelsOn` String, + `Div2TotalGTime` String, + `Div2LongestGTime` String, + `Div2WheelsOff` String, + `Div2TailNum` String, + `Div3Airport` String, + `Div3AirportID` Int32, + `Div3AirportSeqID` Int32, + `Div3WheelsOn` String, + `Div3TotalGTime` String, + `Div3LongestGTime` String, + `Div3WheelsOff` String, + `Div3TailNum` String, + `Div4Airport` String, + `Div4AirportID` Int32, + `Div4AirportSeqID` Int32, + `Div4WheelsOn` String, + `Div4TotalGTime` String, + `Div4LongestGTime` String, + `Div4WheelsOff` String, + `Div4TailNum` String, + `Div5Airport` String, + `Div5AirportID` Int32, + `Div5AirportSeqID` Int32, + `Div5WheelsOn` String, + `Div5TotalGTime` String, + `Div5LongestGTime` String, + `Div5WheelsOff` String, + `Div5TailNum` String ) ENGINE = MergeTree -PARTITION BY Year -ORDER BY (Carrier, FlightDate) -SETTINGS index_granularity = 8192; + PARTITION BY Year + ORDER BY (IATA_CODE_Reporting_Airline, FlightDate) + SETTINGS index_granularity = 8192; ``` Loading data with multiple threads: @@ -206,7 +207,7 @@ LIMIT 10; Q4. The number of delays by carrier for 2007 ``` sql -SELECT Carrier, count(*) +SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*) FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier @@ -220,29 +221,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year=2007 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` Better version of the same query: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year=2007 GROUP BY Carrier @@ -256,29 +257,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` Better version of the same query: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier @@ -297,7 +298,7 @@ FROM from ontime WHERE DepDelay>10 GROUP BY Year -) +) q JOIN ( select @@ -305,7 +306,7 @@ JOIN count(*) as c2 from ontime GROUP BY Year -) USING (Year) +) qq USING (Year) ORDER BY Year; ``` @@ -340,7 +341,7 @@ Q10. ``` sql SELECT - min(Year), max(Year), Carrier, count(*) AS cnt, + min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt, sum(ArrDelayMinutes>30) AS flights_delayed, round(sum(ArrDelayMinutes>30)/count(*),2) AS rate FROM ontime diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index a8753de6abd..c444264b71f 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -102,7 +102,9 @@ For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse buil - [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse` - [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` -After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data. To run `clickhouse server`, you have to additionally download [server](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml) and [users](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/users.xml) configuration files from GitHub. +After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data. + +Run `sudo ./clickhouse install` if you want to install clickhouse system-wide (also with needed configuration files, configuring users etc.). After that run `clickhouse start` commands to start the clickhouse-server and `clickhouse-client` to connect to it. These builds are not recommended for use in production environments because they are less thoroughly tested, but you can do so on your own risk. They also have only a subset of ClickHouse features available. diff --git a/docs/en/guides/apply-catboost-model.md b/docs/en/guides/apply-catboost-model.md index f614b121714..7c2c8a575ec 100644 --- a/docs/en/guides/apply-catboost-model.md +++ b/docs/en/guides/apply-catboost-model.md @@ -159,6 +159,9 @@ The fastest way to evaluate a CatBoost model is compile `libcatboostmodel./home/catboost/models/*_model.xml ``` +!!! note "Note" + You can change path to the CatBoost model configuration later without restarting server. + ## 4. Run the Model Inference from SQL {#run-model-inference} For test model run the ClickHouse client `$ clickhouse client`. diff --git a/docs/en/interfaces/third-party/gui.md b/docs/en/interfaces/third-party/gui.md index 5d14b3aa3cc..fffe0c87a53 100644 --- a/docs/en/interfaces/third-party/gui.md +++ b/docs/en/interfaces/third-party/gui.md @@ -169,19 +169,23 @@ Features: ### SeekTable {#seektable} -[SeekTable](https://www.seektable.com) is a self-service BI tool for data exploration and operational reporting. SeekTable is available both as a cloud service and a self-hosted version. SeekTable reports may be embedded into any web-app. +[SeekTable](https://www.seektable.com) is a self-service BI tool for data exploration and operational reporting. It is available both as a cloud service and a self-hosted version. Reports from SeekTable may be embedded into any web-app. Features: - Business users-friendly reports builder. - Powerful report parameters for SQL filtering and report-specific query customizations. - Can connect to ClickHouse both with a native TCP/IP endpoint and a HTTP(S) interface (2 different drivers). -- It is possible to use all power of CH SQL dialect in dimensions/measures definitions +- It is possible to use all power of ClickHouse SQL dialect in dimensions/measures definitions. - [Web API](https://www.seektable.com/help/web-api-integration) for automated reports generation. -- Supports reports development flow with account data [backup/restore](https://www.seektable.com/help/self-hosted-backup-restore), data models (cubes) / reports configuration is a human-readable XML and can be stored under version control. +- Supports reports development flow with account data [backup/restore](https://www.seektable.com/help/self-hosted-backup-restore); data models (cubes) / reports configuration is a human-readable XML and can be stored under version control system. SeekTable is [free](https://www.seektable.com/help/cloud-pricing) for personal/individual usage. [How to configure ClickHouse connection in SeekTable.](https://www.seektable.com/help/clickhouse-pivot-table) +### Chadmin {#chadmin} + +[Chadmin](https://github.com/bun4uk/chadmin) is a simple UI where you can visualize your currently running queries on your ClickHouse cluster and info about them and kill them if you want. + [Original article](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 5c526ac7260..fa257a84173 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -48,7 +48,8 @@ toc_title: Adopters | Diva-e | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | | Ecwid | E-commerce SaaS | Metrics, Logging | — | — | [Slides in Russian, April 2019](https://nastachku.ru/var/files/1/presentation/backend/2_Backend_6.pdf) | | eBay | E-commerce | Logs, Metrics and Events | — | — | [Official website, Sep 2020](https://tech.ebayinc.com/engineering/ou-online-analytical-processing/) | -| Exness | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| Exness | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| EventBunker.io | Serverless Data Processing | — | — | — | [Tweet, April 2021](https://twitter.com/Halil_D_/status/1379839133472985091) | | FastNetMon | DDoS Protection | Main Product | | — | [Official website](https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-traffic-persistency/) | | Flipkart | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) | | FunCorp | Games | | — | 14 bn records/day as of Jan 2021 | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) | @@ -76,7 +77,8 @@ toc_title: Adopters | Marilyn | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) | | Mello | Marketing | Analytics | 1 server | — | [Article, Oct 2020](https://vc.ru/marketing/166180-razrabotka-tipovogo-otcheta-skvoznoy-analitiki) | | MessageBird | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | -| MindsDB | Machine Learning | Main Product | — | — | [Official Website](https://www.mindsdb.com/blog/machine-learning-models-as-tables-in-ch) |x +| Microsoft | Web Analytics | Clarity (Main Product) | — | — | [A question on GitHub](https://github.com/ClickHouse/ClickHouse/issues/21556) | +| MindsDB | Machine Learning | Main Product | — | — | [Official Website](https://www.mindsdb.com/blog/machine-learning-models-as-tables-in-ch) | | MUX | Online Video | Video Analytics | — | — | [Talk in English, August 2019](https://altinity.com/presentations/2019/8/13/how-clickhouse-became-the-default-analytics-database-for-mux/) | | MGID | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) | | Netskope | Network Security | — | — | — | [Job advertisement, March 2021](https://www.mendeley.com/careers/job/senior-software-developer-backend-developer-1346348) | diff --git a/docs/en/operations/access-rights.md b/docs/en/operations/access-rights.md index 32f8fdcb642..9f7d2a0b95b 100644 --- a/docs/en/operations/access-rights.md +++ b/docs/en/operations/access-rights.md @@ -101,6 +101,9 @@ Privileges can be granted to a role by the [GRANT](../sql-reference/statements/g Row policy is a filter that defines which of the rows are available to a user or a role. Row policy contains filters for one particular table, as well as a list of roles and/or users which should use this row policy. +!!! note "Warning" + Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies. + Management queries: - [CREATE ROW POLICY](../sql-reference/statements/create/row-policy.md) diff --git a/docs/en/operations/performance-test.md b/docs/en/operations/performance-test.md index ca805923ba9..a808ffd0a85 100644 --- a/docs/en/operations/performance-test.md +++ b/docs/en/operations/performance-test.md @@ -12,6 +12,7 @@ With this instruction you can run basic ClickHouse performance test on any serve 3. Copy the link to `clickhouse` binary for amd64 or aarch64. 4. ssh to the server and download it with wget: ```bash +# These links are outdated, please obtain the fresh link from the "commits" page. # For amd64: wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse # For aarch64: diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 2b3bdbd51ef..19671b523e3 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -345,7 +345,8 @@ Similar to `interserver_http_host`, except that this hostname can be used by oth The username and password used to authenticate during [replication](../../engines/table-engines/mergetree-family/replication.md) with the Replicated\* engines. These credentials are used only for communication between replicas and are unrelated to credentials for ClickHouse clients. The server is checking these credentials for connecting replicas and use the same credentials when connecting to other replicas. So, these credentials should be set the same for all replicas in a cluster. By default, the authentication is not used. -**Note:** These credentials are common for replication through `HTTP` and `HTTPS`. +!!! note "Note" + These credentials are common for replication through `HTTP` and `HTTPS`. This section contains the following parameters: @@ -429,7 +430,7 @@ Keys for syslog: Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise. - format – Message format. Possible values: `bsd` and `syslog.` -## send_crash_reports {#server_configuration_parameters-logger} +## send_crash_reports {#server_configuration_parameters-send_crash_reports} Settings for opt-in sending crash reports to the ClickHouse core developers team via [Sentry](https://sentry.io). Enabling it, especially in pre-production environments, is highly appreciated. diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index 77b68715ba9..6e3d0bc0fde 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -56,6 +56,26 @@ Default value: 150. ClickHouse artificially executes `INSERT` longer (adds ‘sleep’) so that the background merge process can merge parts faster than they are added. +## inactive_parts_to_throw_insert {#inactive-parts-to-throw-insert} + +If the number of inactive parts in a single partition more than the `inactive_parts_to_throw_insert` value, `INSERT` is interrupted with the "Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts" exception. + +Possible values: + +- Any positive integer. + +Default value: 0 (unlimited). + +## inactive_parts_to_delay_insert {#inactive-parts-to-delay-insert} + +If the number of inactive parts in a single partition in the table at least that many the `inactive_parts_to_delay_insert` value, an `INSERT` artificially slows down. It is useful when a server fails to clean up parts quickly enough. + +Possible values: + +- Any positive integer. + +Default value: 0 (unlimited). + ## max_delay_to_insert {#max-delay-to-insert} The value in seconds, which is used to calculate the `INSERT` delay, if the number of active parts in a single partition exceeds the [parts_to_delay_insert](#parts-to-delay-insert) value. @@ -115,6 +135,39 @@ Default value: 604800 (1 week). Similar to [replicated_deduplication_window](#replicated-deduplication-window), `replicated_deduplication_window_seconds` specifies how long to store hash sums of blocks for insert deduplication. Hash sums older than `replicated_deduplication_window_seconds` are removed from Zookeeper, even if they are less than ` replicated_deduplication_window`. +## replicated_fetches_http_connection_timeout {#replicated_fetches_http_connection_timeout} + +HTTP connection timeout (in seconds) for part fetch requests. Inherited from default profile [http_connection_timeout](./settings.md#http_connection_timeout) if not set explicitly. + +Possible values: + +- Any positive integer. +- 0 - Use value of `http_connection_timeout`. + +Default value: 0. + +## replicated_fetches_http_send_timeout {#replicated_fetches_http_send_timeout} + +HTTP send timeout (in seconds) for part fetch requests. Inherited from default profile [http_send_timeout](./settings.md#http_send_timeout) if not set explicitly. + +Possible values: + +- Any positive integer. +- 0 - Use value of `http_send_timeout`. + +Default value: 0. + +## replicated_fetches_http_receive_timeout {#replicated_fetches_http_receive_timeout} + +HTTP receive timeout (in seconds) for fetch part requests. Inherited from default profile [http_receive_timeout](./settings.md#http_receive_timeout) if not set explicitly. + +Possible values: + +- Any positive integer. +- 0 - Use value of `http_receive_timeout`. + +Default value: 0. + ## old_parts_lifetime {#old-parts-lifetime} The time (in seconds) of storing inactive parts to protect against data loss during spontaneous server reboots. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index b64fd528f98..a5dc66cf0d6 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -143,6 +143,16 @@ Possible values: Default value: 0. +## http_max_uri_size {#http-max-uri-size} + +Sets the maximum URI length of an HTTP request. + +Possible values: + +- Positive integer. + +Default value: 1048576. + ## send_progress_in_http_headers {#settings-send_progress_in_http_headers} Enables or disables `X-ClickHouse-Progress` HTTP response headers in `clickhouse-server` responses. @@ -854,8 +864,6 @@ For example, when reading from a table, if it is possible to evaluate expression Default value: the number of physical CPU cores. -If less than one SELECT query is normally run on a server at a time, set this parameter to a value slightly less than the actual number of processor cores. - For queries that are completed quickly because of a LIMIT, you can set a lower ‘max_threads’. For example, if the necessary number of entries are located in every block and max_threads = 8, then 8 blocks are retrieved, although it would have been enough to read just one. The smaller the `max_threads` value, the less memory is consumed. @@ -1565,6 +1573,17 @@ Possible values: Default value: 0 +## optimize_skip_unused_shards_rewrite_in {#optimize-skip-unused-shardslrewrite-in} + +Rewrite IN in query for remote shards to exclude values that does not belong to the shard (requires optimize_skip_unused_shards). + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 1 (since it requires `optimize_skip_unused_shards` anyway, which `0` by default) + ## allow_nondeterministic_optimize_skip_unused_shards {#allow-nondeterministic-optimize-skip-unused-shards} Allow nondeterministic (like `rand` or `dictGet`, since later has some caveats with updates) functions in sharding key. @@ -2844,6 +2863,39 @@ Sets the interval in seconds after which periodically refreshed [live view](../. Default value: `60`. +## http_connection_timeout {#http_connection_timeout} + +HTTP connection timeout (in seconds). + +Possible values: + +- Any positive integer. +- 0 - Disabled (infinite timeout). + +Default value: 1. + +## http_send_timeout {#http_send_timeout} + +HTTP send timeout (in seconds). + +Possible values: + +- Any positive integer. +- 0 - Disabled (infinite timeout). + +Default value: 1800. + +## http_receive_timeout {#http_receive_timeout} + +HTTP receive timeout (in seconds). + +Possible values: + +- Any positive integer. +- 0 - Disabled (infinite timeout). + +Default value: 1800. + ## check_query_single_value_result {#check_query_single_value_result} Defines the level of detail for the [CHECK TABLE](../../sql-reference/statements/check-table.md#checking-mergetree-tables) query result for `MergeTree` family engines . @@ -2855,4 +2907,97 @@ Possible values: Default value: `0`. +## prefer_column_name_to_alias {#prefer-column-name-to-alias} + +Enables or disables using the original column names instead of aliases in query expressions and clauses. It especially matters when alias is the same as the column name, see [Expression Aliases](../../sql-reference/syntax.md#notes-on-usage). Enable this setting to make aliases syntax rules in ClickHouse more compatible with most other database engines. + +Possible values: + +- 0 — The column name is substituted with the alias. +- 1 — The column name is not substituted with the alias. + +Default value: `0`. + +**Example** + +The difference between enabled and disabled: + +Query: + +```sql +SET prefer_column_name_to_alias = 0; +SELECT avg(number) AS number, max(number) FROM numbers(10); +``` + +Result: + +```text +Received exception from server (version 21.5.1): +Code: 184. DB::Exception: Received from localhost:9000. DB::Exception: Aggregate function avg(number) is found inside another aggregate function in query: While processing avg(number) AS number. +``` + +Query: + +```sql +SET prefer_column_name_to_alias = 1; +SELECT avg(number) AS number, max(number) FROM numbers(10); +``` + +Result: + +```text +┌─number─┬─max(number)─┐ +│ 4.5 │ 9 │ +└────────┴─────────────┘ +``` + +## limit {#limit} + +Sets the maximum number of rows to get from the query result. It adjusts the value set by the [LIMIT](../../sql-reference/statements/select/limit.md#limit-clause) clause, so that the limit, specified in the query, cannot exceed the limit, set by this setting. + +Possible values: + +- 0 — The number of rows is not limited. +- Positive integer. + +Default value: `0`. + +## offset {#offset} + +Sets the number of rows to skip before starting to return rows from the query. It adjusts the offset set by the [OFFSET](../../sql-reference/statements/select/offset.md#offset-fetch) clause, so that these two values are summarized. + +Possible values: + +- 0 — No rows are skipped . +- Positive integer. + +Default value: `0`. + +**Example** + +Input table: + +``` sql +CREATE TABLE test (i UInt64) ENGINE = MergeTree() ORDER BY i; +INSERT INTO test SELECT number FROM numbers(500); +``` + +Query: + +``` sql +SET limit = 5; +SET offset = 7; +SELECT * FROM test LIMIT 10 OFFSET 100; +``` + +Result: + +``` text +┌───i─┐ +│ 107 │ +│ 108 │ +│ 109 │ +└─────┘ +``` + [Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/en/operations/system-tables/clusters.md b/docs/en/operations/system-tables/clusters.md index cba52586e93..096eca12e7d 100644 --- a/docs/en/operations/system-tables/clusters.md +++ b/docs/en/operations/system-tables/clusters.md @@ -4,63 +4,68 @@ Contains information about clusters available in the config file and the servers Columns: -- `cluster` (String) — The cluster name. -- `shard_num` (UInt32) — The shard number in the cluster, starting from 1. -- `shard_weight` (UInt32) — The relative weight of the shard when writing data. -- `replica_num` (UInt32) — The replica number in the shard, starting from 1. -- `host_name` (String) — The host name, as specified in the config. -- `host_address` (String) — The host IP address obtained from DNS. -- `port` (UInt16) — The port to use for connecting to the server. -- `user` (String) — The name of the user for connecting to the server. -- `errors_count` (UInt32) - number of times this host failed to reach replica. -- `estimated_recovery_time` (UInt32) - seconds left until replica error count is zeroed and it is considered to be back to normal. +- `cluster` ([String](../../sql-reference/data-types/string.md)) — The cluster name. +- `shard_num` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The shard number in the cluster, starting from 1. +- `shard_weight` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The relative weight of the shard when writing data. +- `replica_num` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The replica number in the shard, starting from 1. +- `host_name` ([String](../../sql-reference/data-types/string.md)) — The host name, as specified in the config. +- `host_address` ([String](../../sql-reference/data-types/string.md)) — The host IP address obtained from DNS. +- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The port to use for connecting to the server. +- `is_local` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the host is local. +- `user` ([String](../../sql-reference/data-types/string.md)) — The name of the user for connecting to the server. +- `default_database` ([String](../../sql-reference/data-types/string.md)) — The default database name. +- `errors_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of times this host failed to reach replica. +- `slowdowns_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of slowdowns that led to changing replica when establishing a connection with hedged requests. +- `estimated_recovery_time` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Seconds remaining until the replica error count is zeroed and it is considered to be back to normal. -Please note that `errors_count` is updated once per query to the cluster, but `estimated_recovery_time` is recalculated on-demand. So there could be a case of non-zero `errors_count` and zero `estimated_recovery_time`, that next query will zero `errors_count` and try to use replica as if it has no errors. +**Example** -**See also** +Query: + +```sql +SELECT * FROM system.clusters LIMIT 2 FORMAT Vertical; +``` + +Result: + +```text +Row 1: +────── +cluster: test_cluster_two_shards +shard_num: 1 +shard_weight: 1 +replica_num: 1 +host_name: 127.0.0.1 +host_address: 127.0.0.1 +port: 9000 +is_local: 1 +user: default +default_database: +errors_count: 0 +slowdowns_count: 0 +estimated_recovery_time: 0 + +Row 2: +────── +cluster: test_cluster_two_shards +shard_num: 2 +shard_weight: 1 +replica_num: 1 +host_name: 127.0.0.2 +host_address: 127.0.0.2 +port: 9000 +is_local: 0 +user: default +default_database: +errors_count: 0 +slowdowns_count: 0 +estimated_recovery_time: 0 +``` + +**See Also** - [Table engine Distributed](../../engines/table-engines/special/distributed.md) - [distributed_replica_error_cap setting](../../operations/settings/settings.md#settings-distributed_replica_error_cap) - [distributed_replica_error_half_life setting](../../operations/settings/settings.md#settings-distributed_replica_error_half_life) -**Example** - -```sql -:) SELECT * FROM system.clusters LIMIT 2 FORMAT Vertical; -``` - -```text -Row 1: -────── -cluster: test_cluster -shard_num: 1 -shard_weight: 1 -replica_num: 1 -host_name: clickhouse01 -host_address: 172.23.0.11 -port: 9000 -is_local: 1 -user: default -default_database: -errors_count: 0 -estimated_recovery_time: 0 - -Row 2: -────── -cluster: test_cluster -shard_num: 1 -shard_weight: 1 -replica_num: 2 -host_name: clickhouse02 -host_address: 172.23.0.12 -port: 9000 -is_local: 0 -user: default -default_database: -errors_count: 0 -estimated_recovery_time: 0 - -2 rows in set. Elapsed: 0.002 sec. -``` - [Original article](https://clickhouse.tech/docs/en/operations/system_tables/clusters) diff --git a/docs/en/operations/system-tables/columns.md b/docs/en/operations/system-tables/columns.md index 92a6315d06b..9160dca9a1a 100644 --- a/docs/en/operations/system-tables/columns.md +++ b/docs/en/operations/system-tables/columns.md @@ -4,7 +4,9 @@ Contains information about columns in all the tables. You can use this table to get information similar to the [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table) query, but for multiple tables at once. -The `system.columns` table contains the following columns (the column type is shown in brackets): +Columns from [temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.columns` only in those session where they have been created. They are shown with the empty `database` field. + +Columns: - `database` ([String](../../sql-reference/data-types/string.md)) — Database name. - `table` ([String](../../sql-reference/data-types/string.md)) — Table name. @@ -26,7 +28,7 @@ The `system.columns` table contains the following columns (the column type is sh **Example** ```sql -:) select * from system.columns LIMIT 2 FORMAT Vertical; +SELECT * FROM system.columns LIMIT 2 FORMAT Vertical; ``` ```text @@ -65,8 +67,6 @@ is_in_sorting_key: 0 is_in_primary_key: 0 is_in_sampling_key: 0 compression_codec: - -2 rows in set. Elapsed: 0.002 sec. ``` [Original article](https://clickhouse.tech/docs/en/operations/system_tables/columns) diff --git a/docs/en/operations/system-tables/dictionaries.md b/docs/en/operations/system-tables/dictionaries.md index 3d3bbe2af4e..2bc1be51f19 100644 --- a/docs/en/operations/system-tables/dictionaries.md +++ b/docs/en/operations/system-tables/dictionaries.md @@ -21,6 +21,7 @@ Columns: - `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. - `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. - `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. +- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — The percentage of uses for which the value was found. - `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary. - `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). - `source` ([String](../../sql-reference/data-types/string.md)) — Text describing the [data source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) for the dictionary. @@ -60,4 +61,4 @@ SELECT * FROM system.dictionaries └──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) \ No newline at end of file +[Original article](https://clickhouse.tech/docs/en/operations/system_tables/dictionaries) diff --git a/docs/en/operations/system-tables/distribution_queue.md b/docs/en/operations/system-tables/distribution_queue.md index fdc6a134da2..3b09c20874c 100644 --- a/docs/en/operations/system-tables/distribution_queue.md +++ b/docs/en/operations/system-tables/distribution_queue.md @@ -18,6 +18,10 @@ Columns: - `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Size of compressed data in local files, in bytes. +- `broken_data_files` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of files that has been marked as broken (due to an error). + +- `broken_data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Size of compressed data in broken files, in bytes. + - `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any). **Example** diff --git a/docs/en/operations/system-tables/replication_queue.md b/docs/en/operations/system-tables/replication_queue.md index f3e3a35f13b..539a29432ac 100644 --- a/docs/en/operations/system-tables/replication_queue.md +++ b/docs/en/operations/system-tables/replication_queue.md @@ -15,16 +15,16 @@ Columns: - `node_name` ([String](../../sql-reference/data-types/string.md)) — Node name in ZooKeeper. - `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue, one of: - - `GET_PART` - Get the part from another replica. - - `ATTACH_PART` - Attach the part, possibly from our own replica (if found in `detached` folder). - You may think of it as a `GET_PART` with some optimisations as they're nearly identical. - - `MERGE_PARTS` - Merge the parts. - - `DROP_RANGE` - Delete the parts in the specified partition in the specified number range. - - `CLEAR_COLUMN` - NOTE: Deprecated. Drop specific column from specified partition. - - `CLEAR_INDEX` - NOTE: Deprecated. Drop specific index from specified partition. - - `REPLACE_RANGE` - Drop certain range of partitions and replace them by new ones - - `MUTATE_PART` - Apply one or several mutations to the part. - - `ALTER_METADATA` - Apply alter modification according to global /metadata and /columns paths + + - `GET_PART` — Get the part from another replica. + - `ATTACH_PART` — Attach the part, possibly from our own replica (if found in the `detached` folder). You may think of it as a `GET_PART` with some optimizations as they're nearly identical. + - `MERGE_PARTS` — Merge the parts. + - `DROP_RANGE` — Delete the parts in the specified partition in the specified number range. + - `CLEAR_COLUMN` — NOTE: Deprecated. Drop specific column from specified partition. + - `CLEAR_INDEX` — NOTE: Deprecated. Drop specific index from specified partition. + - `REPLACE_RANGE` — Drop a certain range of parts and replace them with new ones. + - `MUTATE_PART` — Apply one or several mutations to the part. + - `ALTER_METADATA` — Apply alter modification according to global /metadata and /columns paths. - `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution. diff --git a/docs/en/operations/system-tables/tables.md b/docs/en/operations/system-tables/tables.md index 6ad1425e032..ccc9ab94f8b 100644 --- a/docs/en/operations/system-tables/tables.md +++ b/docs/en/operations/system-tables/tables.md @@ -1,59 +1,65 @@ # system.tables {#system-tables} -Contains metadata of each table that the server knows about. Detached tables are not shown in `system.tables`. +Contains metadata of each table that the server knows about. -This table contains the following columns (the column type is shown in brackets): +[Detached](../../sql-reference/statements/detach.md) tables are not shown in `system.tables`. -- `database` (String) — The name of the database the table is in. +[Temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.tables` only in those session where they have been created. They are shown with the empty `database` field and with the `is_temporary` flag switched on. -- `name` (String) — Table name. +Columns: -- `engine` (String) — Table engine name (without parameters). +- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in. -- `is_temporary` (UInt8) - Flag that indicates whether the table is temporary. +- `name` ([String](../../sql-reference/data-types/string.md)) — Table name. -- `data_path` (String) - Path to the table data in the file system. +- `engine` ([String](../../sql-reference/data-types/string.md)) — Table engine name (without parameters). -- `metadata_path` (String) - Path to the table metadata in the file system. +- `is_temporary` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag that indicates whether the table is temporary. -- `metadata_modification_time` (DateTime) - Time of latest modification of the table metadata. +- `data_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table data in the file system. -- `dependencies_database` (Array(String)) - Database dependencies. +- `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system. -- `dependencies_table` (Array(String)) - Table dependencies ([MaterializedView](../../engines/table-engines/special/materializedview.md) tables based on the current table). +- `metadata_modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) - Time of latest modification of the table metadata. -- `create_table_query` (String) - The query that was used to create the table. +- `dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Database dependencies. -- `engine_full` (String) - Parameters of the table engine. +- `dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Table dependencies ([MaterializedView](../../engines/table-engines/special/materializedview.md) tables based on the current table). -- `partition_key` (String) - The partition key expression specified in the table. +- `create_table_query` ([String](../../sql-reference/data-types/string.md)) - The query that was used to create the table. -- `sorting_key` (String) - The sorting key expression specified in the table. +- `engine_full` ([String](../../sql-reference/data-types/string.md)) - Parameters of the table engine. -- `primary_key` (String) - The primary key expression specified in the table. +- `partition_key` ([String](../../sql-reference/data-types/string.md)) - The partition key expression specified in the table. -- `sampling_key` (String) - The sampling key expression specified in the table. +- `sorting_key` ([String](../../sql-reference/data-types/string.md)) - The sorting key expression specified in the table. -- `storage_policy` (String) - The storage policy: +- `primary_key` ([String](../../sql-reference/data-types/string.md)) - The primary key expression specified in the table. + +- `sampling_key` ([String](../../sql-reference/data-types/string.md)) - The sampling key expression specified in the table. + +- `storage_policy` ([String](../../sql-reference/data-types/string.md)) - The storage policy: - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) - [Distributed](../../engines/table-engines/special/distributed.md#distributed) -- `total_rows` (Nullable(UInt64)) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `Null` (including underying `Buffer` table). +- `total_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `NULL` (including underying `Buffer` table). -- `total_bytes` (Nullable(UInt64)) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `Null` (**does not** includes any underlying storage). +- `total_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `NULL` (does not includes any underlying storage). - If the table stores data on disk, returns used space on disk (i.e. compressed). - If the table stores data in memory, returns approximated number of used bytes in memory. -- `lifetime_rows` (Nullable(UInt64)) - Total number of rows INSERTed since server start (only for `Buffer` tables). +- `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows INSERTed since server start (only for `Buffer` tables). -- `lifetime_bytes` (Nullable(UInt64)) - Total number of bytes INSERTed since server start (only for `Buffer` tables). +- `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes INSERTed since server start (only for `Buffer` tables). The `system.tables` table is used in `SHOW TABLES` query implementation. +**Example** + ```sql -:) SELECT * FROM system.tables LIMIT 2 FORMAT Vertical; +SELECT * FROM system.tables LIMIT 2 FORMAT Vertical; ``` ```text @@ -100,8 +106,6 @@ sampling_key: storage_policy: total_rows: ᴺᵁᴸᴸ total_bytes: ᴺᵁᴸᴸ - -2 rows in set. Elapsed: 0.004 sec. ``` [Original article](https://clickhouse.tech/docs/en/operations/system_tables/tables) diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md index e62dea0b04e..865fe58d7cd 100644 --- a/docs/en/operations/tips.md +++ b/docs/en/operations/tips.md @@ -191,8 +191,9 @@ dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/zoo. Java version: ``` text -Java(TM) SE Runtime Environment (build 1.8.0_25-b17) -Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode) +openjdk 11.0.5-shenandoah 2019-10-15 +OpenJDK Runtime Environment (build 11.0.5-shenandoah+10-adhoc.heretic.src) +OpenJDK 64-Bit Server VM (build 11.0.5-shenandoah+10-adhoc.heretic.src, mixed mode) ``` JVM parameters: @@ -204,7 +205,7 @@ ZOOCFGDIR=/etc/$NAME/conf # TODO this is really ugly # How to find out, which jars are needed? # seems, that log4j requires the log4j.properties file to be in the classpath -CLASSPATH="$ZOOCFGDIR:/usr/build/classes:/usr/build/lib/*.jar:/usr/share/zookeeper/zookeeper-3.5.1-metrika.jar:/usr/share/zookeeper/slf4j-log4j12-1.7.5.jar:/usr/share/zookeeper/slf4j-api-1.7.5.jar:/usr/share/zookeeper/servlet-api-2.5-20081211.jar:/usr/share/zookeeper/netty-3.7.0.Final.jar:/usr/share/zookeeper/log4j-1.2.16.jar:/usr/share/zookeeper/jline-2.11.jar:/usr/share/zookeeper/jetty-util-6.1.26.jar:/usr/share/zookeeper/jetty-6.1.26.jar:/usr/share/zookeeper/javacc.jar:/usr/share/zookeeper/jackson-mapper-asl-1.9.11.jar:/usr/share/zookeeper/jackson-core-asl-1.9.11.jar:/usr/share/zookeeper/commons-cli-1.2.jar:/usr/src/java/lib/*.jar:/usr/etc/zookeeper" +CLASSPATH="$ZOOCFGDIR:/usr/build/classes:/usr/build/lib/*.jar:/usr/share/zookeeper-3.6.2/lib/audience-annotations-0.5.0.jar:/usr/share/zookeeper-3.6.2/lib/commons-cli-1.2.jar:/usr/share/zookeeper-3.6.2/lib/commons-lang-2.6.jar:/usr/share/zookeeper-3.6.2/lib/jackson-annotations-2.10.3.jar:/usr/share/zookeeper-3.6.2/lib/jackson-core-2.10.3.jar:/usr/share/zookeeper-3.6.2/lib/jackson-databind-2.10.3.jar:/usr/share/zookeeper-3.6.2/lib/javax.servlet-api-3.1.0.jar:/usr/share/zookeeper-3.6.2/lib/jetty-http-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jetty-io-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jetty-security-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jetty-server-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jetty-servlet-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jetty-util-9.4.24.v20191120.jar:/usr/share/zookeeper-3.6.2/lib/jline-2.14.6.jar:/usr/share/zookeeper-3.6.2/lib/json-simple-1.1.1.jar:/usr/share/zookeeper-3.6.2/lib/log4j-1.2.17.jar:/usr/share/zookeeper-3.6.2/lib/metrics-core-3.2.5.jar:/usr/share/zookeeper-3.6.2/lib/netty-buffer-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-codec-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-common-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-handler-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-resolver-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-transport-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-transport-native-epoll-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/netty-transport-native-unix-common-4.1.50.Final.jar:/usr/share/zookeeper-3.6.2/lib/simpleclient-0.6.0.jar:/usr/share/zookeeper-3.6.2/lib/simpleclient_common-0.6.0.jar:/usr/share/zookeeper-3.6.2/lib/simpleclient_hotspot-0.6.0.jar:/usr/share/zookeeper-3.6.2/lib/simpleclient_servlet-0.6.0.jar:/usr/share/zookeeper-3.6.2/lib/slf4j-api-1.7.25.jar:/usr/share/zookeeper-3.6.2/lib/slf4j-log4j12-1.7.25.jar:/usr/share/zookeeper-3.6.2/lib/snappy-java-1.1.7.jar:/usr/share/zookeeper-3.6.2/lib/zookeeper-3.6.2.jar:/usr/share/zookeeper-3.6.2/lib/zookeeper-jute-3.6.2.jar:/usr/share/zookeeper-3.6.2/lib/zookeeper-prometheus-metrics-3.6.2.jar:/usr/share/zookeeper-3.6.2/etc" ZOOCFG="$ZOOCFGDIR/zoo.cfg" ZOO_LOG_DIR=/var/log/$NAME @@ -213,27 +214,17 @@ GROUP=zookeeper PIDDIR=/var/run/$NAME PIDFILE=$PIDDIR/$NAME.pid SCRIPTNAME=/etc/init.d/$NAME -JAVA=/usr/bin/java +JAVA=/usr/local/jdk-11/bin/java ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain" ZOO_LOG4J_PROP="INFO,ROLLINGFILE" JMXLOCALONLY=false JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '}}' }} \ -Xmx{{ '{{' }} cluster.get('xmx','1G') {{ '}}' }} \ - -Xloggc:/var/log/$NAME/zookeeper-gc.log \ - -XX:+UseGCLogFileRotation \ - -XX:NumberOfGCLogFiles=16 \ - -XX:GCLogFileSize=16M \ + -Xlog:safepoint,gc*=info,age*=debug:file=/var/log/$NAME/zookeeper-gc.log:time,level,tags:filecount=16,filesize=16M -verbose:gc \ - -XX:+PrintGCTimeStamps \ - -XX:+PrintGCDateStamps \ - -XX:+PrintGCDetails - -XX:+PrintTenuringDistribution \ - -XX:+PrintGCApplicationStoppedTime \ - -XX:+PrintGCApplicationConcurrentTime \ - -XX:+PrintSafepointStatistics \ - -XX:+UseParNewGC \ - -XX:+UseConcMarkSweepGC \ --XX:+CMSParallelRemarkEnabled" + -XX:+UseG1GC \ + -Djute.maxbuffer=8388608 \ + -XX:MaxGCPauseMillis=50" ``` Salt init: diff --git a/docs/en/operations/update.md b/docs/en/operations/update.md index 9fa9c44e130..dbcf9ae2b3e 100644 --- a/docs/en/operations/update.md +++ b/docs/en/operations/update.md @@ -15,7 +15,8 @@ $ sudo service clickhouse-server restart If you installed ClickHouse using something other than the recommended `deb` packages, use the appropriate update method. -ClickHouse does not support a distributed update. The operation should be performed consecutively on each separate server. Do not update all the servers on a cluster simultaneously, or the cluster will be unavailable for some time. +!!! note "Note" + You can update multiple servers at once as soon as there is no moment when all replicas of one shard are offline. The upgrade of older version of ClickHouse to specific version: @@ -28,7 +29,3 @@ $ sudo apt-get update $ sudo apt-get install clickhouse-server=xx.yy.a.b clickhouse-client=xx.yy.a.b clickhouse-common-static=xx.yy.a.b $ sudo service clickhouse-server restart ``` - - - - diff --git a/docs/en/operations/utilities/clickhouse-format.md b/docs/en/operations/utilities/clickhouse-format.md new file mode 100644 index 00000000000..17948dce82d --- /dev/null +++ b/docs/en/operations/utilities/clickhouse-format.md @@ -0,0 +1,98 @@ +--- +toc_priority: 65 +toc_title: clickhouse-format +--- + +# clickhouse-format {#clickhouse-format} + +Allows formatting input queries. + +Keys: + +- `--help` or`-h` — Produce help message. +- `--hilite` — Add syntax highlight with ANSI terminal escape sequences. +- `--oneline` — Format in single line. +- `--quiet` or `-q` — Just check syntax, no output on success. +- `--multiquery` or `-n` — Allow multiple queries in the same file. +- `--obfuscate` — Obfuscate instead of formatting. +- `--seed ` — Seed arbitrary string that determines the result of obfuscation. +- `--backslash` — Add a backslash at the end of each line of the formatted query. Can be useful when you copy a query from web or somewhere else with multiple lines, and want to execute it in command line. + +## Examples {#examples} + +1. Highlighting and single line: + +```bash +$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);" +``` + +Result: + +```sql +SELECT sum(number) FROM numbers(5) +``` + +2. Multiqueries: + +```bash +$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" +``` + +Result: + +```text +SELECT * +FROM +( + SELECT 1 AS x + UNION ALL + SELECT 1 + UNION DISTINCT + SELECT 3 +) +; +``` + +3. Obfuscating: + +```bash +$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" +``` + +Result: + +```text +SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END; +``` + +Same query and another seed string: + +```bash +$ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" +``` + +Result: + +```text +SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END; +``` + +4. Adding backslash: + +```bash +$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" +``` + +Result: + +```text +SELECT * \ +FROM \ +( \ + SELECT 1 AS x \ + UNION ALL \ + SELECT 1 \ + UNION DISTINCT \ + SELECT 3 \ +) +``` diff --git a/docs/en/operations/utilities/index.md b/docs/en/operations/utilities/index.md index fe5048f7044..4adbb299b1d 100644 --- a/docs/en/operations/utilities/index.md +++ b/docs/en/operations/utilities/index.md @@ -9,5 +9,8 @@ toc_title: Overview - [clickhouse-local](../../operations/utilities/clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` does this. - [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. - [clickhouse-benchmark](../../operations/utilities/clickhouse-benchmark.md) — Loads server with the custom queries and settings. +- [clickhouse-format](../../operations/utilities/clickhouse-format.md) — Enables formatting input queries. +- [ClickHouse obfuscator](../../operations/utilities/clickhouse-obfuscator.md) — Obfuscates data. +- [ClickHouse compressor](../../operations/utilities/clickhouse-compressor.md) — Compresses and decompresses data. +- [clickhouse-odbc-bridge](../../operations/utilities/odbc-bridge.md) — A proxy server for ODBC driver. -[Original article](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/en/sql-reference/aggregate-functions/combinators.md b/docs/en/sql-reference/aggregate-functions/combinators.md index cddef68d49c..259202805d3 100644 --- a/docs/en/sql-reference/aggregate-functions/combinators.md +++ b/docs/en/sql-reference/aggregate-functions/combinators.md @@ -27,7 +27,37 @@ Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘a ## -SimpleState {#agg-functions-combinator-simplestate} -If you apply this combinator, the aggregate function returns the same value but with a different type. This is an `SimpleAggregateFunction(...)` that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) table engines. +If you apply this combinator, the aggregate function returns the same value but with a different type. This is a [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md) that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) tables. + +**Syntax** + +``` sql +SimpleState(x) +``` + +**Arguments** + +- `x` — Aggregate function parameters. + +**Returned values** + +The value of an aggregate function with the `SimpleAggregateFunction(...)` type. + +**Example** + +Query: + +``` sql +WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1); +``` + +Result: + +``` text +┌─toTypeName(c)────────────────────────┬─c─┐ +│ SimpleAggregateFunction(any, UInt64) │ 0 │ +└──────────────────────────────────────┴───┘ +``` ## -State {#agg-functions-combinator-state} @@ -249,4 +279,3 @@ FROM people └────────┴───────────────────────────┘ ``` - diff --git a/docs/en/sql-reference/aggregate-functions/reference/argmax.md b/docs/en/sql-reference/aggregate-functions/reference/argmax.md index 72aa607a751..0630e2f585e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/argmax.md +++ b/docs/en/sql-reference/aggregate-functions/reference/argmax.md @@ -6,20 +6,12 @@ toc_priority: 106 Calculates the `arg` value for a maximum `val` value. If there are several different values of `arg` for maximum values of `val`, returns the first of these values encountered. -Tuple version of this function will return the tuple with the maximum `val` value. It is convenient for use with [SimpleAggregateFunction](../../../sql-reference/data-types/simpleaggregatefunction.md). - **Syntax** ``` sql argMax(arg, val) ``` -or - -``` sql -argMax(tuple(arg, val)) -``` - **Arguments** - `arg` — Argument. @@ -29,13 +21,7 @@ argMax(tuple(arg, val)) - `arg` value that corresponds to maximum `val` value. -Type: matches `arg` type. - -For tuple in the input: - -- Tuple `(arg, val)`, where `val` is the maximum value and `arg` is a corresponding value. - -Type: [Tuple](../../../sql-reference/data-types/tuple.md). +Type: matches `arg` type. **Example** @@ -52,15 +38,13 @@ Input table: Query: ``` sql -SELECT argMax(user, salary), argMax(tuple(user, salary), salary), argMax(tuple(user, salary)) FROM salary; +SELECT argMax(user, salary) FROM salary; ``` Result: ``` text -┌─argMax(user, salary)─┬─argMax(tuple(user, salary), salary)─┬─argMax(tuple(user, salary))─┐ -│ director │ ('director',5000) │ ('director',5000) │ -└──────────────────────┴─────────────────────────────────────┴─────────────────────────────┘ +┌─argMax(user, salary)─┐ +│ director │ +└──────────────────────┘ ``` - -[Original article](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/argmax/) diff --git a/docs/en/sql-reference/aggregate-functions/reference/argmin.md b/docs/en/sql-reference/aggregate-functions/reference/argmin.md index 7ddc38cd28a..a259a76b7d7 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/argmin.md +++ b/docs/en/sql-reference/aggregate-functions/reference/argmin.md @@ -6,20 +6,12 @@ toc_priority: 105 Calculates the `arg` value for a minimum `val` value. If there are several different values of `arg` for minimum values of `val`, returns the first of these values encountered. -Tuple version of this function will return the tuple with the minimum `val` value. It is convenient for use with [SimpleAggregateFunction](../../../sql-reference/data-types/simpleaggregatefunction.md). - **Syntax** ``` sql argMin(arg, val) ``` -or - -``` sql -argMin(tuple(arg, val)) -``` - **Arguments** - `arg` — Argument. @@ -29,13 +21,7 @@ argMin(tuple(arg, val)) - `arg` value that corresponds to minimum `val` value. -Type: matches `arg` type. - -For tuple in the input: - -- Tuple `(arg, val)`, where `val` is the minimum value and `arg` is a corresponding value. - -Type: [Tuple](../../../sql-reference/data-types/tuple.md). +Type: matches `arg` type. **Example** @@ -52,15 +38,13 @@ Input table: Query: ``` sql -SELECT argMin(user, salary), argMin(tuple(user, salary)) FROM salary; +SELECT argMin(user, salary) FROM salary ``` Result: ``` text -┌─argMin(user, salary)─┬─argMin(tuple(user, salary))─┐ -│ worker │ ('worker',1000) │ -└──────────────────────┴─────────────────────────────┘ +┌─argMin(user, salary)─┐ +│ worker │ +└──────────────────────┘ ``` - -[Original article](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/argmin/) diff --git a/docs/en/sql-reference/aggregate-functions/reference/deltasum.md b/docs/en/sql-reference/aggregate-functions/reference/deltasum.md index e0c74576bb6..2945084db77 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/deltasum.md +++ b/docs/en/sql-reference/aggregate-functions/reference/deltasum.md @@ -6,6 +6,9 @@ toc_priority: 141 Sums the arithmetic difference between consecutive rows. If the difference is negative, it is ignored. +!!! info "Note" + The underlying data must be sorted for this function to work properly. If you would like to use this function in a [materialized view](../../../sql-reference/statements/create/view.md#materialized), you most likely want to use the [deltaSumTimestamp](../../../sql-reference/aggregate-functions/reference/deltasumtimestamp.md#agg_functions-deltasumtimestamp) method instead. + **Syntax** ``` sql diff --git a/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md b/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md new file mode 100644 index 00000000000..241010c4761 --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md @@ -0,0 +1,45 @@ +--- +toc_priority: 141 +--- + +# deltaSumTimestamp {#agg_functions-deltasumtimestamp} + +Adds the difference between consecutive rows. If the difference is negative, it is ignored. + +This function is primarily for [materialized views](../../../sql-reference/statements/create/view.md#materialized) that are ordered by some time bucket-aligned timestamp, for example, a `toStartOfMinute` bucket. Because the rows in such a materialized view will all have the same timestamp, it is impossible for them to be merged in the "right" order. This function keeps track of the `timestamp` of the values it's seen, so it's possible to order the states correctly during merging. + +To calculate the delta sum across an ordered collection you can simply use the [deltaSum](../../../sql-reference/aggregate-functions/reference/deltasum.md#agg_functions-deltasum) function. + +**Syntax** + +``` sql +deltaSumTimestamp(value, timestamp) +``` + +**Arguments** + +- `value` — Input values, must be some [Integer](../../data-types/int-uint.md) type or [Float](../../data-types/float.md) type or a [Date](../../data-types/date.md) or [DateTime](../../data-types/datetime.md). +- `timestamp` — The parameter for order values, must be some [Integer](../../data-types/int-uint.md) type or [Float](../../data-types/float.md) type or a [Date](../../data-types/date.md) or [DateTime](../../data-types/datetime.md). + +**Returned value** + +- Accumulated differences between consecutive values, ordered by the `timestamp` parameter. + +Type: [Integer](../../data-types/int-uint.md) or [Float](../../data-types/float.md) or [Date](../../data-types/date.md) or [DateTime](../../data-types/datetime.md). + +**Example** + +Query: + +```sql +SELECT deltaSumTimestamp(value, timestamp) +FROM (SELECT number AS timestamp, [0, 4, 8, 3, 0, 0, 0, 1, 3, 5][number] AS value FROM numbers(1, 10)); +``` + +Result: + +``` text +┌─deltaSumTimestamp(value, timestamp)─┐ +│ 13 │ +└─────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/aggregate-functions/reference/max.md b/docs/en/sql-reference/aggregate-functions/reference/max.md index c462dd590a6..25173a48906 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/max.md +++ b/docs/en/sql-reference/aggregate-functions/reference/max.md @@ -4,4 +4,21 @@ toc_priority: 3 # max {#agg_function-max} -Calculates the maximum. +Aggregate function that calculates the maximum across a group of values. + +Example: + +``` +SELECT max(salary) FROM employees; +``` + +``` +SELECT department, max(salary) FROM employees GROUP BY department; +``` + +If you need non-aggregate function to choose a maximum of two values, see `greatest`: + +``` +SELECT greatest(a, b) FROM table; +``` + diff --git a/docs/en/sql-reference/aggregate-functions/reference/min.md b/docs/en/sql-reference/aggregate-functions/reference/min.md index 56b03468243..64b155857f8 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/min.md +++ b/docs/en/sql-reference/aggregate-functions/reference/min.md @@ -4,4 +4,20 @@ toc_priority: 2 ## min {#agg_function-min} -Calculates the minimum. +Aggregate function that calculates the minimum across a group of values. + +Example: + +``` +SELECT min(salary) FROM employees; +``` + +``` +SELECT department, min(salary) FROM employees GROUP BY department; +``` + +If you need non-aggregate function to choose a minimum of two values, see `least`: + +``` +SELECT least(a, b) FROM table; +``` diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md index dcc665a68af..dd0d59978d1 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md @@ -6,7 +6,7 @@ toc_priority: 207 Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. -The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. The result depends on the order of running the query, and is nondeterministic. +Memory consumption is `log(n)`, where `n` is a number of values. The result depends on the order of running the query, and is nondeterministic. The performance of the function is lower than performance of [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile) or [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index 56ef598f7e7..32d174136e0 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -12,6 +12,9 @@ The result depends on the order of running the query, and is nondeterministic. When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) function. +!!! note "Note" + Using `quantileTDigestWeighted` [is not recommended for tiny data sets](https://github.com/tdunning/t-digest/issues/167#issuecomment-828650275) and can lead to significat error. In this case, consider possibility of using [`quantileTDigest`](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) instead. + **Syntax** ``` sql diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniq.md b/docs/en/sql-reference/aggregate-functions/reference/uniq.md index 7ba2cdc6cb8..94771c59cc8 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniq.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniq.md @@ -38,3 +38,4 @@ We recommend using this function in almost all scenarios. - [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) +- [uniqTheta](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch) diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md index 4434686ae61..8ae916961f9 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md @@ -49,3 +49,4 @@ Compared to the [uniq](../../../sql-reference/aggregate-functions/reference/uniq - [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) +- [uniqTheta](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch) diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md b/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md index eee675016ee..e446258fbf7 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md @@ -23,3 +23,4 @@ The function takes a variable number of parameters. Parameters can be `Tuple`, ` - [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) - [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqcombined) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqhll12) +- [uniqTheta](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch) diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md index 4983220ed7f..80b1e935b55 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md @@ -37,3 +37,4 @@ We don’t recommend using this function. In most cases, use the [uniq](../../.. - [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) - [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) +- [uniqTheta](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch) diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md b/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md new file mode 100644 index 00000000000..b5161462442 --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md @@ -0,0 +1,39 @@ +--- +toc_priority: 195 +--- + +# uniqTheta {#agg_function-uniqthetasketch} + +Calculates the approximate number of different argument values, using the [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html). + +``` sql +uniqTheta(x[, ...]) +``` + +**Arguments** + +The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. + +**Returned value** + +- A [UInt64](../../../sql-reference/data-types/int-uint.md)-type number. + +**Implementation details** + +Function: + +- Calculates a hash for all parameters in the aggregate, then uses it in calculations. + +- Uses the [KMV](https://datasketches.apache.org/docs/Theta/InverseEstimate.html) algorithm to approximate the number of different argument values. + + 4096(2^12) 64-bit sketch are used. The size of the state is about 41 KB. + +- The relative error is 3.125% (95% confidence), see the [relative error table](https://datasketches.apache.org/docs/Theta/ThetaErrorTable.html) for detail. + +**See Also** + +- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) +- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) +- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) +- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) +- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) diff --git a/docs/en/sql-reference/data-types/date.md b/docs/en/sql-reference/data-types/date.md index 886e93f433c..0cfac4d59fe 100644 --- a/docs/en/sql-reference/data-types/date.md +++ b/docs/en/sql-reference/data-types/date.md @@ -5,7 +5,7 @@ toc_title: Date # Date {#data_type-date} -A date. Stored in two bytes as the number of days since 1970-01-01 (unsigned). Allows storing values from just after the beginning of the Unix Epoch to the upper threshold defined by a constant at the compilation stage (currently, this is until the year 2106, but the final fully-supported year is 2105). +A date. Stored in two bytes as the number of days since 1970-01-01 (unsigned). Allows storing values from just after the beginning of the Unix Epoch to the upper threshold defined by a constant at the compilation stage (currently, this is until the year 2149, but the final fully-supported year is 2148). The date value is stored without the time zone. diff --git a/docs/en/sql-reference/data-types/datetime.md b/docs/en/sql-reference/data-types/datetime.md index d95abe57510..ed07f599b91 100644 --- a/docs/en/sql-reference/data-types/datetime.md +++ b/docs/en/sql-reference/data-types/datetime.md @@ -23,7 +23,7 @@ The point in time is saved as a [Unix timestamp](https://en.wikipedia.org/wiki/U Timezone agnostic unix timestamp is stored in tables, and the timezone is used to transform it to text format or back during data import/export or to make calendar calculations on the values (example: `toDate`, `toHour` functions et cetera). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. -A list of supported time zones can be found in the [IANA Time Zone Database](https://www.iana.org/time-zones) and also can be queried by `SELECT * FROM system.time_zones`. +A list of supported time zones can be found in the [IANA Time Zone Database](https://www.iana.org/time-zones) and also can be queried by `SELECT * FROM system.time_zones`. [The list](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) is also available at Wikipedia. You can explicitly set a time zone for `DateTime`-type columns when creating a table. Example: `DateTime('UTC')`. If the time zone isn’t set, ClickHouse uses the value of the [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) parameter in the server settings or the operating system settings at the moment of the ClickHouse server start. diff --git a/docs/en/sql-reference/data-types/datetime64.md b/docs/en/sql-reference/data-types/datetime64.md index 5cba8315090..1d3725b9fb3 100644 --- a/docs/en/sql-reference/data-types/datetime64.md +++ b/docs/en/sql-reference/data-types/datetime64.md @@ -9,7 +9,7 @@ Allows to store an instant in time, that can be expressed as a calendar date and Tick size (precision): 10-precision seconds -Syntax: +**Syntax:** ``` sql DateTime64(precision, [timezone]) @@ -17,9 +17,11 @@ DateTime64(precision, [timezone]) Internally, stores data as a number of ‘ticks’ since epoch start (1970-01-01 00:00:00 UTC) as Int64. The tick resolution is determined by the precision parameter. Additionally, the `DateTime64` type can store time zone that is the same for the entire column, that affects how the values of the `DateTime64` type values are displayed in text format and how the values specified as strings are parsed (‘2020-01-01 05:00:01.000’). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. See details in [DateTime](../../sql-reference/data-types/datetime.md). +Supported range from January 1, 1925 till December 31, 2283. + ## Examples {#examples} -**1.** Creating a table with `DateTime64`-type column and inserting data into it: +1. Creating a table with `DateTime64`-type column and inserting data into it: ``` sql CREATE TABLE dt @@ -27,15 +29,15 @@ CREATE TABLE dt `timestamp` DateTime64(3, 'Europe/Moscow'), `event_id` UInt8 ) -ENGINE = TinyLog +ENGINE = TinyLog; ``` ``` sql -INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) +INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2); ``` ``` sql -SELECT * FROM dt +SELECT * FROM dt; ``` ``` text @@ -45,13 +47,13 @@ SELECT * FROM dt └─────────────────────────┴──────────┘ ``` -- When inserting datetime as an integer, it is treated as an appropriately scaled Unix Timestamp (UTC). `1546300800000` (with precision 3) represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Europe/Moscow` (UTC+3) timezone specified, when outputting as a string the value will be shown as `'2019-01-01 03:00:00'` +- When inserting datetime as an integer, it is treated as an appropriately scaled Unix Timestamp (UTC). `1546300800000` (with precision 3) represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Europe/Moscow` (UTC+3) timezone specified, when outputting as a string the value will be shown as `'2019-01-01 03:00:00'`. - When inserting string value as datetime, it is treated as being in column timezone. `'2019-01-01 00:00:00'` will be treated as being in `Europe/Moscow` timezone and stored as `1546290000000`. -**2.** Filtering on `DateTime64` values +2. Filtering on `DateTime64` values ``` sql -SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') +SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow'); ``` ``` text @@ -60,12 +62,12 @@ SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europ └─────────────────────────┴──────────┘ ``` -Unlike `DateTime`, `DateTime64` values are not converted from `String` automatically +Unlike `DateTime`, `DateTime64` values are not converted from `String` automatically. -**3.** Getting a time zone for a `DateTime64`-type value: +3. Getting a time zone for a `DateTime64`-type value: ``` sql -SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x +SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x; ``` ``` text @@ -74,13 +76,13 @@ SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS └─────────────────────────┴────────────────────────────────┘ ``` -**4.** Timezone conversion +4. Timezone conversion ``` sql SELECT toDateTime64(timestamp, 3, 'Europe/London') as lon_time, toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time -FROM dt +FROM dt; ``` ``` text @@ -90,7 +92,7 @@ FROM dt └─────────────────────────┴─────────────────────────┘ ``` -## See Also {#see-also} +**See Also** - [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md) - [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md) diff --git a/docs/en/sql-reference/data-types/decimal.md b/docs/en/sql-reference/data-types/decimal.md index b268f747165..af2655cd0c2 100644 --- a/docs/en/sql-reference/data-types/decimal.md +++ b/docs/en/sql-reference/data-types/decimal.md @@ -31,7 +31,7 @@ For example, Decimal32(4) can contain numbers from -99999.9999 to 99999.9999 wit Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string. -Because modern CPU’s do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64. +Because modern CPUs do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64. ## Operations and Result Type {#operations-and-result-type} diff --git a/docs/en/sql-reference/data-types/simpleaggregatefunction.md b/docs/en/sql-reference/data-types/simpleaggregatefunction.md index 244779c5ca8..f3a245e9627 100644 --- a/docs/en/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/en/sql-reference/data-types/simpleaggregatefunction.md @@ -2,6 +2,8 @@ `SimpleAggregateFunction(name, types_of_arguments…)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we don’t have to store and process any extra data. +The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix. + The following aggregate functions are supported: - [`any`](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any) @@ -18,8 +20,6 @@ The following aggregate functions are supported: - [`sumMap`](../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap) - [`minMap`](../../sql-reference/aggregate-functions/reference/minmap.md#agg_functions-minmap) - [`maxMap`](../../sql-reference/aggregate-functions/reference/maxmap.md#agg_functions-maxmap) -- [`argMin`](../../sql-reference/aggregate-functions/reference/argmin.md) -- [`argMax`](../../sql-reference/aggregate-functions/reference/argmax.md) !!! note "Note" diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index de6a780235f..84ac45d2f35 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -7,9 +7,9 @@ toc_title: Storing Dictionaries in Memory There are a variety of ways to store dictionaries in memory. -We recommend [flat](#flat), [hashed](#dicts-external_dicts_dict_layout-hashed) and [complex_key_hashed](#complex-key-hashed). which provide optimal processing speed. +We recommend [flat](#flat), [hashed](#dicts-external_dicts_dict_layout-hashed) and [complex_key_hashed](#complex-key-hashed), which provide optimal processing speed. -Caching is not recommended because of potentially poor performance and difficulties in selecting optimal parameters. Read more in the section “[cache](#cache)”. +Caching is not recommended because of potentially poor performance and difficulties in selecting optimal parameters. Read more in the section [cache](#cache). There are several ways to improve dictionary performance: @@ -68,9 +68,9 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings The dictionary is completely stored in memory in the form of flat arrays. How much memory does the dictionary use? The amount is proportional to the size of the largest key (in space used). -The dictionary key has the `UInt64` type and the value is limited to 500,000. If a larger key is discovered when creating the dictionary, ClickHouse throws an exception and does not create the dictionary. +The dictionary key has the [UInt64](../../../sql-reference/data-types/int-uint.md) type and the value is limited to `max_array_size` (by default — 500,000). If a larger key is discovered when creating the dictionary, ClickHouse throws an exception and does not create the dictionary. Dictionary flat arrays initial size is controlled by `initial_array_size` setting (by default — 1024). -All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. +All types of sources are supported. When updating, data (from a file or from a table) is read in it entirety. This method provides the best performance among all available methods of storing the dictionary. @@ -78,21 +78,27 @@ Configuration example: ``` xml - + + 50000 + 5000000 + ``` or ``` sql -LAYOUT(FLAT()) +LAYOUT(FLAT(INITIAL_ARRAY_SIZE 50000 MAX_ARRAY_SIZE 5000000)) ``` ### hashed {#dicts-external_dicts_dict_layout-hashed} The dictionary is completely stored in memory in the form of a hash table. The dictionary can contain any number of elements with any identifiers In practice, the number of keys can reach tens of millions of items. -The hash table will be preallocated (this will make dictionary load faster), if the is approx number of total rows is known, this is supported only if the source is `clickhouse` without any `` (since in case of `` you can filter out too much rows and the dictionary will allocate too much memory, that will not be used eventually). +If `preallocate` is `true` (default is `false`) the hash table will be preallocated (this will make the dictionary load faster). But note that you should use it only if: + +- The source support an approximate number of elements (for now it is supported only by the `ClickHouse` source). +- There are no duplicates in the data (otherwise it may increase memory usage for the hashtable). All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. @@ -100,21 +106,23 @@ Configuration example: ``` xml - + + 0 + ``` or ``` sql -LAYOUT(HASHED()) +LAYOUT(HASHED(PREALLOCATE 0)) ``` ### sparse_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} Similar to `hashed`, but uses less memory in favor more CPU usage. -It will be also preallocated so as `hashed`, note that it is even more significant for `sparse_hashed`. +It will be also preallocated so as `hashed` (with `preallocate` set to `true`), and note that it is even more significant for `sparse_hashed`. Configuration example: @@ -124,8 +132,10 @@ Configuration example: ``` +or + ``` sql -LAYOUT(SPARSE_HASHED()) +LAYOUT(SPARSE_HASHED([PREALLOCATE 0])) ``` ### complex_key_hashed {#complex-key-hashed} diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 081cc5b0b69..04901c1ad57 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -30,7 +30,7 @@ LIFETIME(300) Setting `0` (`LIFETIME(0)`) prevents dictionaries from updating. -You can set a time interval for upgrades, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when upgrading on a large number of servers. +You can set a time interval for updates, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when updating on a large number of servers. Example of settings: @@ -54,7 +54,7 @@ LIFETIME(MIN 300 MAX 360) If `0` and `0`, ClickHouse does not reload the dictionary by timeout. In this case, ClickHouse can reload the dictionary earlier if the dictionary configuration file was changed or the `SYSTEM RELOAD DICTIONARY` command was executed. -When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md): +When updating the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md): - For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. - For MySQL source, the time of modification is checked using a `SHOW TABLE STATUS` query (in case of MySQL 8 you need to disable meta-information caching in MySQL by `set global information_schema_stats_expiry=0`. @@ -86,3 +86,4 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher ... ``` +For `Cache`, `ComplexKeyCache`, `SSDCache`, and `SSDComplexKeyCache` dictionaries both synchronious and asynchronious updates are supported. diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index dbf2fa67ac5..c6770b531f4 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -159,14 +159,14 @@ Configuration fields: | Tag | Description | Required | |------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| | `name` | Column name. | Yes | -| `type` | ClickHouse data type.
ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
[Nullable](../../../sql-reference/data-types/nullable.md) is not supported. | Yes | -| `null_value` | Default value for a non-existing element.
In the example, it is an empty string. You cannot use `NULL` in this field. | Yes | +| `type` | ClickHouse data type.
ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
[Nullable](../../../sql-reference/data-types/nullable.md) is currently supported for [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache) dictionaries. In [IPTrie](external-dicts-dict-layout.md#ip-trie) dictionaries `Nullable` types are not supported. | Yes | +| `null_value` | Default value for a non-existing element.
In the example, it is an empty string. [NULL](../../syntax.md#null-literal) value can be used only for the `Nullable` types (see the previous line with types description). | Yes | | `expression` | [Expression](../../../sql-reference/syntax.md#syntax-expressions) that ClickHouse executes on the value.
The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

Default value: no expression. | No | | `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md).

Default value: `false`. | No | | `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).
If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.

Default value: `false`. | No | | `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

Default value: `false`. | No | -## See Also {#see-also} +**See Also** - [Functions for working with external dictionaries](../../../sql-reference/functions/ext-dict-functions.md). diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md index fa127dab103..22f4182a1c0 100644 --- a/docs/en/sql-reference/dictionaries/index.md +++ b/docs/en/sql-reference/dictionaries/index.md @@ -10,8 +10,6 @@ A dictionary is a mapping (`key -> attributes`) that is convenient for various t ClickHouse supports special functions for working with dictionaries that can be used in queries. It is easier and more efficient to use dictionaries with functions than a `JOIN` with reference tables. -[NULL](../../sql-reference/syntax.md#null-literal) values can’t be stored in a dictionary. - ClickHouse supports: - [Built-in dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md). diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index 499376a70d4..7d4fcf29476 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -1544,3 +1544,52 @@ SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res ``` Note that the `arraySumNonNegative` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument. +## arrayProduct {#arrayproduct} + +Multiplies elements of an [array](../../sql-reference/data-types/array.md). + +**Syntax** + +``` sql +arrayProduct(arr) +``` + +**Arguments** + +- `arr` — [Array](../../sql-reference/data-types/array.md) of numeric values. + +**Returned value** + +- A product of array's elements. + +Type: [Float64](../../sql-reference/data-types/float.md). + +**Examples** + +Query: + +``` sql +SELECT arrayProduct([1,2,3,4,5,6]) as res; +``` + +Result: + +``` text +┌─res───┐ +│ 720 │ +└───────┘ +``` + +Query: + +``` sql +SELECT arrayProduct([toDecimal64(1,8), toDecimal64(2,8), toDecimal64(3,8)]) as res, toTypeName(res); +``` + +Return value type is always [Float64](../../sql-reference/data-types/float.md). Result: + +``` text +┌─res─┬─toTypeName(arrayProduct(array(toDecimal64(1, 8), toDecimal64(2, 8), toDecimal64(3, 8))))─┐ +│ 6 │ Float64 │ +└─────┴──────────────────────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/functions/bitmap-functions.md b/docs/en/sql-reference/functions/bitmap-functions.md index 7ec400949e9..4875532605e 100644 --- a/docs/en/sql-reference/functions/bitmap-functions.md +++ b/docs/en/sql-reference/functions/bitmap-functions.md @@ -33,7 +33,7 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res); ``` text ┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ -│  │ AggregateFunction(groupBitmap, UInt8) │ +│ │ AggregateFunction(groupBitmap, UInt8) │ └─────┴──────────────────────────────────────────────┘ ``` diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 6b26dae4546..69baf64ef55 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -5,7 +5,7 @@ toc_title: Dates and Times # Functions for Working with Dates and Times {#functions-for-working-with-dates-and-times} -Support for time zones +Support for time zones. All functions for working with the date and time that have a logical use for the time zone can accept a second optional time zone argument. Example: Asia/Yekaterinburg. In this case, they use the specified time zone instead of the local (default) one. @@ -23,13 +23,53 @@ SELECT └─────────────────────┴────────────┴────────────┴─────────────────────┘ ``` +## timeZone {#timezone} + +Returns the timezone of the server. + +**Syntax** + +``` sql +timeZone() +``` + +Alias: `timezone`. + +**Returned value** + +- Timezone. + +Type: [String](../../sql-reference/data-types/string.md). + ## toTimeZone {#totimezone} -Convert time or date and time to the specified time zone. The time zone is an attribute of the Date/DateTime types. The internal value (number of seconds) of the table field or of the resultset's column does not change, the column's type changes and its string representation changes accordingly. +Converts time or date and time to the specified time zone. The time zone is an attribute of the `Date` and `DateTime` data types. The internal value (number of seconds) of the table field or of the resultset's column does not change, the column's type changes and its string representation changes accordingly. + +**Syntax** + +``` sql +toTimezone(value, timezone) +``` + +Alias: `toTimezone`. + +**Arguments** + +- `value` — Time or date and time. [DateTime64](../../sql-reference/data-types/datetime64.md). +- `timezone` — Timezone for the returned value. [String](../../sql-reference/data-types/string.md). + +**Returned value** + +- Date and time. + +Type: [DateTime](../../sql-reference/data-types/datetime.md). + +**Example** + +Query: ```sql -SELECT - toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc, +SELECT toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc, toTypeName(time_utc) AS type_utc, toInt32(time_utc) AS int32utc, toTimeZone(time_utc, 'Asia/Yekaterinburg') AS time_yekat, @@ -40,6 +80,7 @@ SELECT toInt32(time_samoa) AS int32samoa FORMAT Vertical; ``` +Result: ```text Row 1: @@ -57,6 +98,82 @@ int32samoa: 1546300800 `toTimeZone(time_utc, 'Asia/Yekaterinburg')` changes the `DateTime('UTC')` type to `DateTime('Asia/Yekaterinburg')`. The value (Unixtimestamp) 1546300800 stays the same, but the string representation (the result of the toString() function) changes from `time_utc: 2019-01-01 00:00:00` to `time_yekat: 2019-01-01 05:00:00`. +## timeZoneOf {#timezoneof} + +Returns the timezone name of [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md) data types. + +**Syntax** + +``` sql +timeZoneOf(value) +``` + +Alias: `timezoneOf`. + +**Arguments** + +- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). + +**Returned value** + +- Timezone name. + +Type: [String](../../sql-reference/data-types/string.md). + +**Example** + +Query: +``` sql +SELECT timezoneOf(now()); +``` + +Result: +``` text +┌─timezoneOf(now())─┐ +│ Etc/UTC │ +└───────────────────┘ +``` + +## timeZoneOffset {#timezoneoffset} + +Returns a timezone offset in seconds from [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time). The function takes into account [daylight saving time](https://en.wikipedia.org/wiki/Daylight_saving_time) and historical timezone changes at the specified date and time. +[IANA timezone database](https://www.iana.org/time-zones) is used to calculate the offset. + +**Syntax** + +``` sql +timeZoneOffset(value) +``` + +Alias: `timezoneOffset`. + +**Arguments** + +- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). + +**Returned value** + +- Offset from UTC in seconds. + +Type: [Int32](../../sql-reference/data-types/int-uint.md). + +**Example** + +Query: + +``` sql +SELECT toDateTime('2021-04-21 10:20:30', 'America/New_York') AS Time, toTypeName(Time) AS Type, + timeZoneOffset(Time) AS Offset_in_seconds, (Offset_in_seconds / 3600) AS Offset_in_hours; +``` + +Result: + +``` text +┌────────────────Time─┬─Type─────────────────────────┬─Offset_in_seconds─┬─Offset_in_hours─┐ +│ 2021-04-21 10:20:30 │ DateTime('America/New_York') │ -14400 │ -4 │ +└─────────────────────┴──────────────────────────────┴───────────────────┴─────────────────┘ +``` + ## toYear {#toyear} Converts a date or date with time to a UInt16 number containing the year number (AD). @@ -147,6 +264,9 @@ Result: └────────────────┘ ``` +!!! attention "Attention" + The return type `toStartOf*` functions described below is `Date` or `DateTime`. Though these functions can take `DateTime64` as an argument, passing them a `DateTime64` that is out of normal range (years 1970 - 2105) will give incorrect result. + ## toStartOfYear {#tostartofyear} Rounds down a date or date with time to the first day of the year. @@ -388,13 +508,13 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d Truncates date and time data to the specified part of date. -**Syntax** +**Syntax** ``` sql date_trunc(unit, value[, timezone]) ``` -Alias: `dateTrunc`. +Alias: `dateTrunc`. **Arguments** @@ -457,13 +577,13 @@ Result: Adds the time interval or date interval to the provided date or date with time. -**Syntax** +**Syntax** ``` sql date_add(unit, value, date) ``` -Aliases: `dateAdd`, `DATE_ADD`. +Aliases: `dateAdd`, `DATE_ADD`. **Arguments** @@ -478,7 +598,7 @@ Aliases: `dateAdd`, `DATE_ADD`. - `month` - `quarter` - `year` - + - `value` — Value of interval to add. [Int](../../sql-reference/data-types/int-uint.md). - `date` — The date or date with time to which `value` is added. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). @@ -583,7 +703,7 @@ Aliases: `dateSub`, `DATE_SUB`. - `month` - `quarter` - `year` - + - `value` — Value of interval to subtract. [Int](../../sql-reference/data-types/int-uint.md). - `date` — The date or date with time from which `value` is subtracted. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). @@ -613,16 +733,16 @@ Result: Adds the specified time value with the provided date or date time value. -**Syntax** +**Syntax** ``` sql timestamp_add(date, INTERVAL value unit) ``` -Aliases: `timeStampAdd`, `TIMESTAMP_ADD`. +Aliases: `timeStampAdd`, `TIMESTAMP_ADD`. **Arguments** - + - `date` — Date or date with time. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). - `value` — Value of interval to add. [Int](../../sql-reference/data-types/int-uint.md). - `unit` — The type of interval to add. [String](../../sql-reference/data-types/string.md). @@ -642,7 +762,7 @@ Aliases: `timeStampAdd`, `TIMESTAMP_ADD`. Date or date with time with the specified `value` expressed in `unit` added to `date`. Type: [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). - + **Example** Query: @@ -663,13 +783,13 @@ Result: Subtracts the time interval from the provided date or date with time. -**Syntax** +**Syntax** ``` sql timestamp_sub(unit, value, date) ``` -Aliases: `timeStampSub`, `TIMESTAMP_SUB`. +Aliases: `timeStampSub`, `TIMESTAMP_SUB`. **Arguments** @@ -684,7 +804,7 @@ Aliases: `timeStampSub`, `TIMESTAMP_SUB`. - `month` - `quarter` - `year` - + - `value` — Value of interval to subtract. [Int](../../sql-reference/data-types/int-uint.md). - `date` — Date or date with time. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). @@ -709,12 +829,12 @@ Result: │ 2018-07-18 01:02:03 │ └──────────────────────────────────────────────────────────────┘ ``` - + ## now {#now} -Returns the current date and time. +Returns the current date and time. -**Syntax** +**Syntax** ``` sql now([timezone]) @@ -1069,4 +1189,3 @@ Result: │ 2020-01-01 │ └────────────────────────────────────┘ ``` - diff --git a/docs/en/sql-reference/functions/ext-dict-functions.md b/docs/en/sql-reference/functions/ext-dict-functions.md index 5fc146f603f..8eb10bd0208 100644 --- a/docs/en/sql-reference/functions/ext-dict-functions.md +++ b/docs/en/sql-reference/functions/ext-dict-functions.md @@ -10,21 +10,22 @@ toc_title: External Dictionaries For information on connecting and configuring external dictionaries, see [External dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). -## dictGet {#dictget} +## dictGet, dictGetOrDefault, dictGetOrNull {#dictget} -Retrieves a value from an external dictionary. +Retrieves values from an external dictionary. ``` sql -dictGet('dict_name', 'attr_name', id_expr) -dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) +dictGet('dict_name', attr_names, id_expr) +dictGetOrDefault('dict_name', attr_names, id_expr, default_value_expr) +dictGetOrNull('dict_name', attr_name, id_expr) ``` **Arguments** - `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). -- `attr_name` — Name of the column of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal). +- `attr_names` — Name of the column of the dictionary, [String literal](../../sql-reference/syntax.md#syntax-string-literal), or tuple of column names, [Tuple](../../sql-reference/data-types/tuple.md)([String literal](../../sql-reference/syntax.md#syntax-string-literal)). - `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a [UInt64](../../sql-reference/data-types/int-uint.md) or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration. -- `default_value_expr` — Value returned if the dictionary doesn’t contain a row with the `id_expr` key. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning the value in the data type configured for the `attr_name` attribute. +- `default_value_expr` — Values returned if the dictionary doesn’t contain a row with the `id_expr` key. [Expression](../../sql-reference/syntax.md#syntax-expressions) or [Tuple](../../sql-reference/data-types/tuple.md)([Expression](../../sql-reference/syntax.md#syntax-expressions)), returning the value (or values) in the data types configured for the `attr_names` attribute. **Returned value** @@ -34,12 +35,13 @@ dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dictGet` returns the content of the `` element specified for the attribute in the dictionary configuration. - `dictGetOrDefault` returns the value passed as the `default_value_expr` parameter. + - `dictGetOrNull` returns `NULL` in case key was not found in dictionary. ClickHouse throws an exception if it cannot parse the value of the attribute or the value doesn’t match the attribute data type. -**Example** +**Example for simple key dictionary** -Create a text file `ext-dict-text.csv` containing the following: +Create a text file `ext-dict-test.csv` containing the following: ``` text 1,1 @@ -96,6 +98,130 @@ LIMIT 3 └─────┴────────┘ ``` +**Example for complex key dictionary** + +Create a text file `ext-dict-mult.csv` containing the following: + +``` text +1,1,'1' +2,2,'2' +3,3,'3' +``` + +The first column is `id`, the second is `c1`, the third is `c2`. + +Configure the external dictionary: + +``` xml + + + ext-dict-mult + + + /path-to/ext-dict-mult.csv + CSV + + + + + + + + id + + + c1 + UInt32 + + + + c2 + String + + + + 0 + + +``` + +Perform the query: + +``` sql +SELECT + dictGet('ext-dict-mult', ('c1','c2'), number) AS val, + toTypeName(val) AS type +FROM system.numbers +LIMIT 3; +``` + +``` text +┌─val─────┬─type──────────────────┐ +│ (1,'1') │ Tuple(UInt8, String) │ +│ (2,'2') │ Tuple(UInt8, String) │ +│ (3,'3') │ Tuple(UInt8, String) │ +└─────────┴───────────────────────┘ +``` + +**Example for range key dictionary** + +Input table: + +```sql +CREATE TABLE range_key_dictionary_source_table +( + key UInt64, + start_date Date, + end_date Date, + value String, + value_nullable Nullable(String) +) +ENGINE = TinyLog(); + +INSERT INTO range_key_dictionary_source_table VALUES(1, toDate('2019-05-20'), toDate('2019-05-20'), 'First', 'First'); +INSERT INTO range_key_dictionary_source_table VALUES(2, toDate('2019-05-20'), toDate('2019-05-20'), 'Second', NULL); +INSERT INTO range_key_dictionary_source_table VALUES(3, toDate('2019-05-20'), toDate('2019-05-20'), 'Third', 'Third'); +``` + +Create the external dictionary: + +```sql +CREATE DICTIONARY range_key_dictionary +( + key UInt64, + start_date Date, + end_date Date, + value String, + value_nullable Nullable(String) +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_key_dictionary_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start_date MAX end_date); +``` + +Perform the query: + +``` sql +SELECT + (number, toDate('2019-05-20')), + dictHas('range_key_dictionary', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', 'value', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', 'value_nullable', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', ('value', 'value_nullable'), number, toDate('2019-05-20')) +FROM system.numbers LIMIT 5 FORMAT TabSeparated; +``` +Result: + +``` text +(0,'2019-05-20') 0 \N \N (NULL,NULL) +(1,'2019-05-20') 1 First First ('First','First') +(2,'2019-05-20') 0 \N \N (NULL,NULL) +(3,'2019-05-20') 0 \N \N (NULL,NULL) +(4,'2019-05-20') 0 \N \N (NULL,NULL) +``` + **See Also** - [External Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) @@ -202,4 +328,3 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dictGet[Type]OrDefault` returns the value passed as the `default_value_expr` parameter. ClickHouse throws an exception if it cannot parse the value of the attribute or the value doesn’t match the attribute data type. - diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index 5cc95fe298a..c06711b3cd2 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -224,7 +224,7 @@ assumeNotNull(x) **Returned values** - The original value from the non-`Nullable` type, if it is not `NULL`. -- The default value for the non-`Nullable` type if the original value was `NULL`. +- Implementation specific result if the original value was `NULL`. **Example** diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md index c60067b06af..0ea4cfd6fbe 100644 --- a/docs/en/sql-reference/functions/hash-functions.md +++ b/docs/en/sql-reference/functions/hash-functions.md @@ -437,13 +437,13 @@ A [FixedString(16)](../../sql-reference/data-types/fixedstring.md) data type has **Example** ``` sql -SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type; +SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` ``` text -┌─MurmurHash3──────┬─type────────────┐ -│ 6�1�4"S5KT�~~q │ FixedString(16) │ -└──────────────────┴─────────────────┘ +┌─MurmurHash3──────────────────────┬─type───┐ +│ 368A1A311CB7342253354B548E7E7E71 │ String │ +└──────────────────────────────────┴────────┘ ``` ## xxHash32, xxHash64 {#hash-functions-xxhash32} diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index ca6ef684faf..d545a0ae4e6 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -16,46 +16,60 @@ The following assumptions are made: ## visitParamHas(params, name) {#visitparamhasparams-name} -Checks whether there is a field with the ‘name’ name. +Checks whether there is a field with the `name` name. + +Alias: `simpleJSONHas`. ## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name} -Parses UInt64 from the value of the field named ‘name’. If this is a string field, it tries to parse a number from the beginning of the string. If the field doesn’t exist, or it exists but doesn’t contain a number, it returns 0. +Parses UInt64 from the value of the field named `name`. If this is a string field, it tries to parse a number from the beginning of the string. If the field doesn’t exist, or it exists but doesn’t contain a number, it returns 0. + +Alias: `simpleJSONExtractUInt`. ## visitParamExtractInt(params, name) {#visitparamextractintparams-name} The same as for Int64. +Alias: `simpleJSONExtractInt`. + ## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name} The same as for Float64. +Alias: `simpleJSONExtractFloat`. + ## visitParamExtractBool(params, name) {#visitparamextractboolparams-name} Parses a true/false value. The result is UInt8. +Alias: `simpleJSONExtractBool`. + ## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name} Returns the value of a field, including separators. +Alias: `simpleJSONExtractRaw`. + Examples: ``` sql -visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' -visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' +visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"'; +visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}'; ``` ## visitParamExtractString(params, name) {#visitparamextractstringparams-name} Parses the string in double quotes. The value is unescaped. If unescaping failed, it returns an empty string. +Alias: `simpleJSONExtractString`. + Examples: ``` sql -visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' -visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' -visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' -visitParamExtractString('{"abc":"hello}', 'abc') = '' +visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0'; +visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺'; +visitParamExtractString('{"abc":"\\u263"}', 'abc') = ''; +visitParamExtractString('{"abc":"hello}', 'abc') = ''; ``` There is currently no support for code points in the format `\uXXXX\uYYYY` that are not from the basic multilingual plane (they are converted to CESU-8 instead of UTF-8). diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index c40aa3d1eae..ecb7b982157 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -696,10 +696,6 @@ Returns the server’s uptime in seconds. Returns the version of the server as a string. -## timezone() {#timezone} - -Returns the timezone of the server. - ## blockNumber {#blocknumber} Returns the sequence number of the data block where the row is located. @@ -1192,6 +1188,109 @@ SELECT defaultValueOfTypeName('Nullable(Int8)') └──────────────────────────────────────────┘ ``` +## indexHint {#indexhint} +The function is intended for debugging and introspection purposes. The function ignores it's argument and always returns 1. Arguments are not even evaluated. + +But for the purpose of index analysis, the argument of this function is analyzed as if it was present directly without being wrapped inside `indexHint` function. This allows to select data in index ranges by the corresponding condition but without further filtering by this condition. The index in ClickHouse is sparse and using `indexHint` will yield more data than specifying the same condition directly. + +**Syntax** + +```sql +SELECT * FROM table WHERE indexHint() +``` + +**Returned value** + +1. Type: [Uint8](https://clickhouse.yandex/docs/en/data_types/int_uint/#diapazony-uint). + +**Example** + +Here is the example of test data from the table [ontime](../../getting-started/example-datasets/ontime.md). + +Input table: + +```sql +SELECT count() FROM ontime +``` + +```text +┌─count()─┐ +│ 4276457 │ +└─────────┘ +``` + +The table has indexes on the fields `(FlightDate, (Year, FlightDate))`. + +Create a query, where the index is not used. + +Query: + +```sql +SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k +``` + +ClickHouse processed the entire table (`Processed 4.28 million rows`). + +Result: + +```text +┌──────────k─┬─count()─┐ +│ 2017-01-01 │ 13970 │ +│ 2017-01-02 │ 15882 │ +........................ +│ 2017-09-28 │ 16411 │ +│ 2017-09-29 │ 16384 │ +│ 2017-09-30 │ 12520 │ +└────────────┴─────────┘ +``` + +To apply the index, select a specific date. + +Query: + +```sql +SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k +``` + +By using the index, ClickHouse processed a significantly smaller number of rows (`Processed 32.74 thousand rows`). + +Result: + +```text +┌──────────k─┬─count()─┐ +│ 2017-09-15 │ 16428 │ +└────────────┴─────────┘ +``` + +Now wrap the expression `k = '2017-09-15'` into `indexHint` function. + +Query: + +```sql +SELECT + FlightDate AS k, + count() +FROM ontime +WHERE indexHint(k = '2017-09-15') +GROUP BY k +ORDER BY k ASC +``` + +ClickHouse used the index in the same way as the previous time (`Processed 32.74 thousand rows`). +The expression `k = '2017-09-15'` was not used when generating the result. +In examle the `indexHint` function allows to see adjacent dates. + +Result: + +```text +┌──────────k─┬─count()─┐ +│ 2017-09-14 │ 7071 │ +│ 2017-09-15 │ 16428 │ +│ 2017-09-16 │ 1077 │ +│ 2017-09-30 │ 8167 │ +└────────────┴─────────┘ +``` + ## replicate {#other-functions-replicate} Creates an array with a single value. diff --git a/docs/en/sql-reference/functions/splitting-merging-functions.md b/docs/en/sql-reference/functions/splitting-merging-functions.md index bd7e209549c..d61896b6d98 100644 --- a/docs/en/sql-reference/functions/splitting-merging-functions.md +++ b/docs/en/sql-reference/functions/splitting-merging-functions.md @@ -90,6 +90,53 @@ SELECT splitByString('', 'abcde') └────────────────────────────┘ ``` +## splitByRegexp(regexp, s) {#splitbyregexpseparator-s} + +Splits a string into substrings separated by a regular expression. It uses a regular expression string `regexp` as the separator. If the `regexp` is empty, it will split the string s into an array of single characters. If no match is found for this regex expression, the string `s` won't be split. + +**Syntax** + +``` sql +splitByRegexp(, ) +``` + +**Arguments** + +- `regexp` — Regular expression. Constant. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md). +- `s` — The string to split. [String](../../sql-reference/data-types/string.md). + +**Returned value(s)** + +Returns an array of selected substrings. Empty substrings may be selected when: + + +- A non-empty regular expression match occurs at the beginning or end of the string; +- There are multiple consecutive non-empty regular expression matches; +- The original string `s` is empty while the regular expression is not empty. + +Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md). +**Example** + +``` sql +SELECT splitByRegexp('\\d+', 'a12bc23de345f') +``` + +``` text +┌─splitByRegexp('\\d+', 'a12bc23de345f')─┐ +│ ['a','bc','de','f'] │ +└────────────────────────────────────────┘ +``` + +``` sql +SELECT splitByRegexp('', 'abcde') +``` + +``` text +┌─splitByRegexp('', 'abcde')─┐ +│ ['a','b','c','d','e'] │ +└────────────────────────────┘ +``` + ## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator} Concatenates the strings listed in the array with the separator.’separator’ is an optional parameter: a constant string, set to an empty string by default. @@ -149,4 +196,3 @@ Result: │ [['abc','123'],['8','"hkl"']] │ └───────────────────────────────────────────────────────────────────────┘ ``` - diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 3d3caaf6e23..85570cb408d 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -649,3 +649,65 @@ Result: - [List of XML and HTML character entity references](https://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references) + +## extractTextFromHTML {#extracttextfromhtml} + +A function to extract text from HTML or XHTML. +It does not necessarily 100% conform to any of the HTML, XML or XHTML standards, but the implementation is reasonably accurate and it is fast. The rules are the following: + +1. Comments are skipped. Example: ``. Comment must end with `-->`. Nested comments are not possible. +Note: constructions like `` and `` are not valid comments in HTML but they are skipped by other rules. +2. CDATA is pasted verbatim. Note: CDATA is XML/XHTML specific. But it is processed for "best-effort" approach. +3. `script` and `style` elements are removed with all their content. Note: it is assumed that closing tag cannot appear inside content. For example, in JS string literal has to be escaped like `"<\/script>"`. +Note: comments and CDATA are possible inside `script` or `style` - then closing tags are not searched inside CDATA. Example: `]]>`. But they are still searched inside comments. Sometimes it becomes complicated: ` var y = "-->"; alert(x + y);` +Note: `script` and `style` can be the names of XML namespaces - then they are not treated like usual `script` or `style` elements. Example: `Hello`. +Note: whitespaces are possible after closing tag name: `` but not before: `< / script>`. +4. Other tags or tag-like elements are skipped without inner content. Example: `.` +Note: it is expected that this HTML is illegal: `` +Note: it also skips something like tags: `<>`, ``, etc. +Note: tag without end is skipped to the end of input: `world`, `Helloworld` - there is no whitespace in HTML, but the function inserts it. Also consider: `Hello

world

`, `Hello
world`. This behavior is reasonable for data analysis, e.g. to convert HTML to a bag of words. +7. Also note that correct handling of whitespaces requires the support of `
` and CSS `display` and `white-space` properties.
+
+**Syntax**
+
+``` sql
+extractTextFromHTML(x)
+```
+
+**Arguments**
+
+-   `x` — input text. [String](../../sql-reference/data-types/string.md). 
+
+**Returned value**
+
+-   Extracted text.
+
+Type: [String](../../sql-reference/data-types/string.md).
+
+**Example**
+
+The first example contains several tags and a comment and also shows whitespace processing.
+The second example shows `CDATA` and `script` tag processing.
+In the third example text is extracted from the full HTML response received by the [url](../../sql-reference/table-functions/url.md) function.
+
+Query:
+
+``` sql
+SELECT extractTextFromHTML(' 

A text withtags.

'); +SELECT extractTextFromHTML('CDATA]]> '); +SELECT extractTextFromHTML(html) FROM url('http://www.donothingfor2minutes.com/', RawBLOB, 'html String'); +``` + +Result: + +``` text +A text with tags . +The content within CDATA +Do Nothing for 2 Minutes 2:00   +``` diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 3ece30be5b8..d661bd4cd59 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -74,6 +74,9 @@ Deletes the column with the name `name`. If the `IF EXISTS` clause is specified, Deletes data from the file system. Since this deletes entire files, the query is completed almost instantly. +!!! warning "Warning" + You can’t delete a column if it is referenced by [materialized view](../../../sql-reference/statements/create/view.md#materialized). Otherwise, it returns an error. + Example: ``` sql @@ -180,7 +183,7 @@ ALTER TABLE table_name MODIFY column_name REMOVE property; ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; ``` -## See Also +**See Also** - [REMOVE TTL](ttl.md). diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index f7183ba525c..b22f89928b9 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -16,7 +16,7 @@ The following operations with [partitions](../../../engines/table-engines/merget - [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) — Resets the value of a specified column in a partition. - [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) — Resets the specified secondary index in a partition. - [FREEZE PARTITION](#alter_freeze-partition) — Creates a backup of a partition. -- [FETCH PARTITION](#alter_fetch-partition) — Downloads a partition from another server. +- [FETCH PARTITION\|PART](#alter_fetch-partition) — Downloads a part or partition from another server. - [MOVE PARTITION\|PART](#alter_move-partition) — Move partition/data part to another disk or volume. @@ -88,12 +88,10 @@ Read more about setting the partition expression in a section [How to specify th This query is replicated. The replica-initiator checks whether there is data in the `detached` directory. If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table. -If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own -`detached` folder, it attaches the data without fetching it from other replicas. +If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own `detached` folder, it attaches the data without fetching it from other replicas. If there is no part with the correct checksums, the data is downloaded from any replica having the part. -You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the -table on all replicas. +You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the table on all replicas. ## ATTACH PARTITION FROM {#alter_attach-partition-from} @@ -101,8 +99,8 @@ table on all replicas. ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 ``` -This query copies the data partition from the `table1` to `table2`. -Note that data won't be deleted neither from `table1` nor from `table2`. +This query copies the data partition from `table1` to `table2`. +Note that data will be deleted neither from `table1` nor from `table2`. For the query to run successfully, the following conditions must be met: @@ -198,29 +196,35 @@ ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr The query works similar to `CLEAR COLUMN`, but it resets an index instead of a column data. -## FETCH PARTITION {#alter_fetch-partition} +## FETCH PARTITION|PART {#alter_fetch-partition} ``` sql -ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' +ALTER TABLE table_name FETCH PARTITION|PART partition_expr FROM 'path-in-zookeeper' ``` Downloads a partition from another server. This query only works for the replicated tables. The query does the following: -1. Downloads the partition from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper. +1. Downloads the partition|part from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper. 2. Then the query puts the downloaded data to the `detached` directory of the `table_name` table. Use the [ATTACH PARTITION\|PART](#alter_attach-partition) query to add the data to the table. For example: +1. FETCH PARTITION ``` sql ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; ALTER TABLE users ATTACH PARTITION 201902; ``` +2. FETCH PART +``` sql +ALTER TABLE users FETCH PART 201901_2_2_0 FROM '/clickhouse/tables/01-01/visits'; +ALTER TABLE users ATTACH PART 201901_2_2_0; +``` Note that: -- The `ALTER ... FETCH PARTITION` query isn’t replicated. It places the partition to the `detached` directory only on the local server. +- The `ALTER ... FETCH PARTITION|PART` query isn’t replicated. It places the part or partition to the `detached` directory only on the local server. - The `ALTER TABLE ... ATTACH` query is replicated. It adds the data to all replicas. The data is added to one of the replicas from the `detached` directory, and to the others - from neighboring replicas. Before downloading, the system checks if the partition exists and the table structure matches. The most appropriate replica is selected automatically from the healthy replicas. diff --git a/docs/en/sql-reference/statements/alter/ttl.md b/docs/en/sql-reference/statements/alter/ttl.md index aa7ee838e10..9cd63d3b8fe 100644 --- a/docs/en/sql-reference/statements/alter/ttl.md +++ b/docs/en/sql-reference/statements/alter/ttl.md @@ -79,7 +79,7 @@ The `TTL` is no longer there, so the second row is not deleted: └───────────────────────┴─────────┴──────────────┘ ``` -### See Also +**See Also** - More about the [TTL-expression](../../../sql-reference/statements/create/table.md#ttl-expression). - Modify column [with TTL](../../../sql-reference/statements/alter/column.md#alter_modify-column). diff --git a/docs/en/sql-reference/statements/alter/user.md b/docs/en/sql-reference/statements/alter/user.md index b590bf4887d..73081bc8619 100644 --- a/docs/en/sql-reference/statements/alter/user.md +++ b/docs/en/sql-reference/statements/alter/user.md @@ -15,11 +15,23 @@ ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1] [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}] [[ADD | DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] + [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...] ``` To use `ALTER USER` you must have the [ALTER USER](../../../sql-reference/statements/grant.md#grant-access-management) privilege. +## GRANTEES Clause {#grantees} + +Specifies users or roles which are allowed to receive [privileges](../../../sql-reference/statements/grant.md#grant-privileges) from this user on the condition this user has also all required access granted with [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Options of the `GRANTEES` clause: + +- `user` — Specifies a user this user can grant privileges to. +- `role` — Specifies a role this user can grant privileges to. +- `ANY` — This user can grant privileges to anyone. It's the default setting. +- `NONE` — This user can grant privileges to none. + +You can exclude any user or role by using the `EXCEPT` expression. For example, `ALTER USER user1 GRANTEES ANY EXCEPT user2`. It means if `user1` has some privileges granted with `GRANT OPTION` it will be able to grant those privileges to anyone except `user2`. + ## Examples {#alter-user-examples} Set assigned roles as default: @@ -43,3 +55,9 @@ Set all the assigned roles to default, excepting `role1` and `role2`: ``` sql ALTER USER user DEFAULT ROLE ALL EXCEPT role1, role2 ``` + +Allows the user with `john` account to grant his privileges to the user with `jack` account: + +``` sql +ALTER USER john GRANTEES jack; +``` diff --git a/docs/en/sql-reference/statements/create/row-policy.md b/docs/en/sql-reference/statements/create/row-policy.md index cbe639c6fc5..1df7cc36995 100644 --- a/docs/en/sql-reference/statements/create/row-policy.md +++ b/docs/en/sql-reference/statements/create/row-policy.md @@ -5,39 +5,84 @@ toc_title: ROW POLICY # CREATE ROW POLICY {#create-row-policy-statement} -Creates [filters for rows](../../../operations/access-rights.md#row-policy-management), which a user can read from a table. +Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table. + +!!! note "Warning" + Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies. Syntax: ``` sql CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1 [, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...] + [FOR SELECT] USING condition [AS {PERMISSIVE | RESTRICTIVE}] - [FOR SELECT] - [USING condition] [TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}] ``` -`ON CLUSTER` clause allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). +## USING Clause {#create-row-policy-using} -## AS Clause {#create-row-policy-as} - -Using this section you can create permissive or restrictive policies. - -Permissive policy grants access to rows. Permissive policies which apply to the same table are combined together using the boolean `OR` operator. Policies are permissive by default. - -Restrictive policy restricts access to rows. Restrictive policies which apply to the same table are combined together using the boolean `AND` operator. - -Restrictive policies apply to rows that passed the permissive filters. If you set restrictive policies but no permissive policies, the user can’t get any row from the table. +Allows to specify a condition to filter rows. An user will see a row if the condition is calculated to non-zero for the row. ## TO Clause {#create-row-policy-to} -In the section `TO` you can provide a mixed list of roles and users, for example, `CREATE ROW POLICY ... TO accountant, john@localhost`. +In the section `TO` you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`. -Keyword `ALL` means all the ClickHouse users including current user. Keywords `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` +Keyword `ALL` means all the ClickHouse users including current user. Keyword `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` -## Examples {#examples} +!!! note "Note" + If there are no row policies defined for a table then any user can `SELECT` all the row from the table. Defining one or more row policies for the table makes the access to the table depending on the row policies no matter if those row policies are defined for the current user or not. For example, the following policy + + `CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter` -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost` + forbids the users `mira` and `peter` to see the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all. + + If that's not desirable it can't be fixed by adding one more row policy, like the following: -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira` + `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` + +## AS Clause {#create-row-policy-as} + +It's allowed to have more than one policy enabled on the same table for the same user at the one time. So we need a way to combine the conditions from multiple policies. + +By default policies are combined using the boolean `OR` operator. For example, the following policies + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio +``` + +enables the user `peter` to see rows with either `b=1` or `c=2`. + +The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. By default policies are permissive, which means they are combined using the boolean `OR` operator. + +A policy can be defined as restrictive as an alternative. Restrictive policies are combined using the boolean `AND` operator. + +Here is the general formula: + +``` +row_is_visible = (one or more of the permissive policies' conditions are non-zero) AND + (all of the restrictive policies's conditions are non-zero) +``` + +For example, the following policies + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio +``` + +enables the user `peter` to see rows only if both `b=1` AND `c=2`. + +## ON CLUSTER Clause {#create-row-policy-on-cluster} + +Allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). + + +## Examples + +`CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost` + +`CREATE ROW POLICY filter2 ON mydb.mytable USING a<1000 AND b=5 TO ALL EXCEPT mira` + +`CREATE ROW POLICY filter3 ON mydb.mytable USING 1 TO admin` diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index bad99980191..5f1f0151350 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -50,15 +50,32 @@ Creates a table with the same result as that of the [table function](../../../sq ### From SELECT query {#from-select-query} ``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... +CREATE TABLE [IF NOT EXISTS] [db.]table_name[(name1 [type1], name2 [type2], ...)] ENGINE = engine AS SELECT ... ``` -Creates a table with a structure like the result of the `SELECT` query, with the `engine` engine, and fills it with data from SELECT. +Creates a table with a structure like the result of the `SELECT` query, with the `engine` engine, and fills it with data from `SELECT`. Also you can explicitly specify columns description. -In all cases, if `IF NOT EXISTS` is specified, the query won’t return an error if the table already exists. In this case, the query won’t do anything. +If the table already exists and `IF NOT EXISTS` is specified, the query won’t do anything. There can be other clauses after the `ENGINE` clause in the query. See detailed documentation on how to create tables in the descriptions of [table engines](../../../engines/table-engines/index.md#table_engines). +**Example** + +Query: + +``` sql +CREATE TABLE t1 (x String) ENGINE = Memory AS SELECT 1; +SELECT x, toTypeName(x) FROM t1; +``` + +Result: + +```text +┌─x─┬─toTypeName(x)─┐ +│ 1 │ String │ +└───┴───────────────┘ +``` + ## NULL Or NOT NULL Modifiers {#null-modifiers} `NULL` and `NOT NULL` modifiers after data type in column definition allow or do not allow it to be [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable). diff --git a/docs/en/sql-reference/statements/create/user.md b/docs/en/sql-reference/statements/create/user.md index 49a4e3813a1..456adc4bb13 100644 --- a/docs/en/sql-reference/statements/create/user.md +++ b/docs/en/sql-reference/statements/create/user.md @@ -15,6 +15,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}] [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [DEFAULT ROLE role [,...]] + [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...] ``` @@ -53,12 +54,24 @@ Another way of specifying host is to use `@` syntax following the username. Exam !!! info "Warning" ClickHouse treats `user_name@'address'` as a username as a whole. Thus, technically you can create multiple users with the same `user_name` and different constructions after `@`. However, we don’t recommend to do so. + +## GRANTEES Clause {#grantees} + +Specifies users or roles which are allowed to receive [privileges](../../../sql-reference/statements/grant.md#grant-privileges) from this user on the condition this user has also all required access granted with [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Options of the `GRANTEES` clause: + +- `user` — Specifies a user this user can grant privileges to. +- `role` — Specifies a role this user can grant privileges to. +- `ANY` — This user can grant privileges to anyone. It's the default setting. +- `NONE` — This user can grant privileges to none. + +You can exclude any user or role by using the `EXCEPT` expression. For example, `CREATE USER user1 GRANTEES ANY EXCEPT user2`. It means if `user1` has some privileges granted with `GRANT OPTION` it will be able to grant those privileges to anyone except `user2`. + ## Examples {#create-user-examples} Create the user account `mira` protected by the password `qwerty`: ``` sql -CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty' +CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty'; ``` `mira` should start client app at the host where the ClickHouse server runs. @@ -66,13 +79,13 @@ CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty' Create the user account `john`, assign roles to it and make this roles default: ``` sql -CREATE USER john DEFAULT ROLE role1, role2 +CREATE USER john DEFAULT ROLE role1, role2; ``` Create the user account `john` and make all his future roles default: ``` sql -CREATE USER user DEFAULT ROLE ALL +CREATE USER john DEFAULT ROLE ALL; ``` When some role is assigned to `john` in the future, it will become default automatically. @@ -80,5 +93,11 @@ When some role is assigned to `john` in the future, it will become default autom Create the user account `john` and make all his future roles default excepting `role1` and `role2`: ``` sql -CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2 +CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2; +``` + +Create the user account `john` and allow him to grant his privileges to the user with `jack` account: + +``` sql +CREATE USER john GRANTEES jack; ``` diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index e9c9ed3693c..59f5b297ece 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -10,7 +10,7 @@ Makes the server "forget" about the existence of the table or materialized view. Syntax: ``` sql -DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] +DETACH TABLE|VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster] [PERMANENTLY] ``` Detaching does not delete the data or metadata for the table or materialized view. If the table or view was not detached `PERMANENTLY`, on the next server launch the server will read the metadata and recall the table/view again. If the table or view was detached `PERMANENTLY`, there will be no automatic recall. diff --git a/docs/en/sql-reference/statements/explain.md b/docs/en/sql-reference/statements/explain.md index 3cca29801dd..1c19adcdcd8 100644 --- a/docs/en/sql-reference/statements/explain.md +++ b/docs/en/sql-reference/statements/explain.md @@ -5,7 +5,7 @@ toc_title: EXPLAIN # EXPLAIN Statement {#explain} -Show the execution plan of a statement. +Shows the execution plan of a statement. Syntax: @@ -47,9 +47,9 @@ Union ### EXPLAIN AST {#explain-ast} -Dump query AST. +Dump query AST. Supports all types of queries, not only `SELECT`. -Example: +Examples: ```sql EXPLAIN AST SELECT 1; @@ -63,9 +63,25 @@ SelectWithUnionQuery (children 1) Literal UInt64_1 ``` +```sql +EXPLAIN AST ALTER TABLE t1 DELETE WHERE date = today(); +``` + +```sql + explain + AlterQuery t1 (children 1) + ExpressionList (children 1) + AlterCommand 27 (children 1) + Function equals (children 1) + ExpressionList (children 2) + Identifier date + Function today (children 1) + ExpressionList +``` + ### EXPLAIN SYNTAX {#explain-syntax} -Return query after syntax optimizations. +Returns query after syntax optimizations. Example: @@ -88,15 +104,16 @@ FROM ) AS `--.s` CROSS JOIN system.numbers AS c ``` + ### EXPLAIN PLAN {#explain-plan} Dump query plan steps. Settings: -- `header` — Print output header for step. Default: 0. -- `description` — Print step description. Default: 1. -- `actions` — Print detailed information about step actions. Default: 0. +- `header` — Prints output header for step. Default: 0. +- `description` — Prints step description. Default: 1. +- `actions` — Prints detailed information about step actions. Default: 0. Example: @@ -115,15 +132,16 @@ Union ``` !!! note "Note" - Step and query cost estimation is not supported. + Step and query cost estimation is not supported. ### EXPLAIN PIPELINE {#explain-pipeline} Settings: -- `header` — Print header for each output port. Default: 0. -- `graph` — Use DOT graph description language. Default: 0. -- `compact` — Print graph in compact mode if graph is enabled. Default: 1. +- `header` — Prints header for each output port. Default: 0. +- `graph` — Prints a graph described in the [DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) graph description language. Default: 0. +- `compact` — Prints graph in compact mode if `graph` setting is enabled. Default: 1. +- `indexes` — Shows used indexes, the number of filtered parts, and granules for every index applied. Default: 0. Supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. Example: diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index 0afc9b5b95f..89f35b5f701 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -91,7 +91,7 @@ Hierarchy of privileges: - `ALTER ADD CONSTRAINT` - `ALTER DROP CONSTRAINT` - `ALTER TTL` - - `ALTER MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL` - `ALTER SETTINGS` - `ALTER MOVE PARTITION` - `ALTER FETCH PARTITION` @@ -102,9 +102,9 @@ Hierarchy of privileges: - [CREATE](#grant-create) - `CREATE DATABASE` - `CREATE TABLE` + - `CREATE TEMPORARY TABLE` - `CREATE VIEW` - `CREATE DICTIONARY` - - `CREATE TEMPORARY TABLE` - [DROP](#grant-drop) - `DROP DATABASE` - `DROP TABLE` @@ -150,7 +150,7 @@ Hierarchy of privileges: - `SYSTEM RELOAD` - `SYSTEM RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES` - `SYSTEM TTL MERGES` - `SYSTEM FETCHES` @@ -276,10 +276,10 @@ Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries - `ALTER ADD CONSTRAINT`. Level: `TABLE`. Aliases: `ADD CONSTRAINT` - `ALTER DROP CONSTRAINT`. Level: `TABLE`. Aliases: `DROP CONSTRAINT` - `ALTER TTL`. Level: `TABLE`. Aliases: `ALTER MODIFY TTL`, `MODIFY TTL` - - `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL`. Level: `TABLE`. Aliases: `MATERIALIZE TTL` - `ALTER SETTINGS`. Level: `TABLE`. Aliases: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING` - `ALTER MOVE PARTITION`. Level: `TABLE`. Aliases: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART` - - `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `FETCH PARTITION` + - `ALTER FETCH PARTITION`. Level: `TABLE`. Aliases: `ALTER FETCH PART`, `FETCH PARTITION`, `FETCH PART` - `ALTER FREEZE PARTITION`. Level: `TABLE`. Aliases: `FREEZE PARTITION` - `ALTER VIEW` Level: `GROUP` - `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW` @@ -304,9 +304,9 @@ Allows executing [CREATE](../../sql-reference/statements/create/index.md) and [A - `CREATE`. Level: `GROUP` - `CREATE DATABASE`. Level: `DATABASE` - `CREATE TABLE`. Level: `TABLE` + - `CREATE TEMPORARY TABLE`. Level: `GLOBAL` - `CREATE VIEW`. Level: `VIEW` - `CREATE DICTIONARY`. Level: `DICTIONARY` - - `CREATE TEMPORARY TABLE`. Level: `GLOBAL` **Notes** @@ -401,7 +401,7 @@ Allows a user to execute [SYSTEM](../../sql-reference/statements/system.md) quer - `SYSTEM RELOAD`. Level: `GROUP` - `SYSTEM RELOAD CONFIG`. Level: `GLOBAL`. Aliases: `RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY`. Level: `GLOBAL`. Aliases: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Level: `GLOBAL`. Aliases: R`ELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Level: `GLOBAL`. Aliases: `RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES` - `SYSTEM TTL MERGES`. Level: `TABLE`. Aliases: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES` - `SYSTEM FETCHES`. Level: `TABLE`. Aliases: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES` diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index 49a7404d76e..247252d3f4e 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -5,13 +5,18 @@ toc_title: OPTIMIZE # OPTIMIZE Statement {#misc_operations-optimize} +This query tries to initialize an unscheduled merge of data parts for tables. + +!!! warning "Warning" + `OPTIMIZE` can’t fix the `Too many parts` error. + +**Syntax** + ``` sql OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]] ``` -This query tries to initialize an unscheduled merge of data parts for tables with a table engine from the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family. - -The `OPTMIZE` query is also supported for the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported. +The `OPTMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported. When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all nodes (if the `replication_alter_partitions_sync` setting is enabled). @@ -21,12 +26,13 @@ When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engin - If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine. -### BY expression {#by-expression} +## BY expression {#by-expression} If you want to perform deduplication on custom set of columns rather than on all, you can specify list of columns explicitly or use any combination of [`*`](../../sql-reference/statements/select/index.md#asterisk), [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) or [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier) expressions. The explictly written or implicitly expanded list of columns must include all columns specified in row ordering expression (both primary and sorting keys) and partitioning expression (partitioning key). -Note that `*` behaves just like in `SELECT`: `MATERIALIZED`, and `ALIAS` columns are not used for expansion. -Also, it is an error to specify empty list of columns, or write an expression that results in an empty list of columns, or deduplicate by an ALIAS column. +!!! note "Note" + Notice that `*` behaves just like in `SELECT`: `MATERIALIZED` and `ALIAS` columns are not used for expansion. + Also, it is an error to specify empty list of columns, or write an expression that results in an empty list of columns, or deduplicate by an ALIAS column. ``` sql OPTIMIZE TABLE table DEDUPLICATE; -- the old one @@ -39,9 +45,10 @@ OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT co OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT (colX, colY); ``` -**Example:** +**Examples** + +Create a table: -A silly synthetic table. ``` sql CREATE TABLE example ( primary_key Int32, @@ -56,31 +63,31 @@ PARTITION BY partition_key ORDER BY (primary_key, secondary_key); ``` +The 'old' deduplicate, all columns are taken into account, i.e. row is removed only if all values in all columns are equal to corresponding values in previous row. + ``` sql --- The 'old' deduplicate, all columns are taken into account, i.e. row is removed only if all values in all columns are equal to corresponding values in previous row. OPTIMIZE TABLE example FINAL DEDUPLICATE; ``` +Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED`: `primary_key`, `secondary_key`, `value`, `partition_key`, and `materialized_value` columns. + ``` sql --- Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED`: `primary_key`, `secondary_key`, `value`, `partition_key`, and `materialized_value` columns. OPTIMIZE TABLE example FINAL DEDUPLICATE BY *; ``` +Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED` and explicitly not `materialized_value`: `primary_key`, `secondary_key`, `value`, and `partition_key` columns. + ``` sql --- Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED` and explicitly not `materialized_value`: `primary_key`, `secondary_key`, `value`, and `partition_key` columns. OPTIMIZE TABLE example FINAL DEDUPLICATE BY * EXCEPT materialized_value; ``` +Deduplicate explicitly by `primary_key`, `secondary_key`, and `partition_key` columns. ``` sql --- Deduplicate explicitly by `primary_key`, `secondary_key`, and `partition_key` columns. OPTIMIZE TABLE example FINAL DEDUPLICATE BY primary_key, secondary_key, partition_key; ``` +Deduplicate by any column matching a regex: `primary_key`, `secondary_key`, and `partition_key` columns. + ``` sql --- Deduplicate by any column matching a regex: `primary_key`, `secondary_key`, and `partition_key` columns. OPTIMIZE TABLE example FINAL DEDUPLICATE BY COLUMNS('.*_key'); ``` - - -!!! warning "Warning" - `OPTIMIZE` can’t fix the “Too many parts” error. diff --git a/docs/en/sql-reference/statements/rename.md b/docs/en/sql-reference/statements/rename.md index 4f14ad016a3..a9dda6ed3b2 100644 --- a/docs/en/sql-reference/statements/rename.md +++ b/docs/en/sql-reference/statements/rename.md @@ -5,6 +5,14 @@ toc_title: RENAME # RENAME Statement {#misc_operations-rename} +## RENAME DATABASE {#misc_operations-rename_database} +Renames database, support only for Atomic database engine + +``` +RENAME DATABASE atomic_database1 TO atomic_database2 [ON CLUSTER cluster] +``` + +## RENAME TABLE {#misc_operations-rename_table} Renames one or more tables. ``` sql diff --git a/docs/en/sql-reference/statements/select/index.md b/docs/en/sql-reference/statements/select/index.md index ada4699c224..0712ea8daa7 100644 --- a/docs/en/sql-reference/statements/select/index.md +++ b/docs/en/sql-reference/statements/select/index.md @@ -47,6 +47,7 @@ Specifics of each optional clause are covered in separate sections, which are li - [SELECT clause](#select-clause) - [DISTINCT clause](../../../sql-reference/statements/select/distinct.md) - [LIMIT clause](../../../sql-reference/statements/select/limit.md) +- [OFFSET clause](../../../sql-reference/statements/select/offset.md) - [UNION clause](../../../sql-reference/statements/select/union.md) - [INTO OUTFILE clause](../../../sql-reference/statements/select/into-outfile.md) - [FORMAT clause](../../../sql-reference/statements/select/format.md) diff --git a/docs/en/sql-reference/statements/select/limit.md b/docs/en/sql-reference/statements/select/limit.md index 4b25efbe95a..6ed38b2dd64 100644 --- a/docs/en/sql-reference/statements/select/limit.md +++ b/docs/en/sql-reference/statements/select/limit.md @@ -12,6 +12,9 @@ toc_title: LIMIT If there is no [ORDER BY](../../../sql-reference/statements/select/order-by.md) clause that explicitly sorts results, the choice of rows for the result may be arbitrary and non-deterministic. +!!! note "Note" + The number of rows in the result set can also depend on the [limit](../../../operations/settings/settings.md#limit) setting. + ## LIMIT … WITH TIES Modifier {#limit-with-ties} When you set `WITH TIES` modifier for `LIMIT n[,m]` and specify `ORDER BY expr_list`, you will get in result first `n` or `n,m` rows and all rows with same `ORDER BY` fields values equal to row at position `n` for `LIMIT n` and `m` for `LIMIT n,m`. diff --git a/docs/en/sql-reference/statements/select/offset.md b/docs/en/sql-reference/statements/select/offset.md new file mode 100644 index 00000000000..3efd916bcb8 --- /dev/null +++ b/docs/en/sql-reference/statements/select/offset.md @@ -0,0 +1,86 @@ +--- +toc_title: OFFSET +--- + +# OFFSET FETCH Clause {#offset-fetch} + +`OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query. + +``` sql +OFFSET offset_row_count {ROW | ROWS}] [FETCH {FIRST | NEXT} fetch_row_count {ROW | ROWS} {ONLY | WITH TIES}] +``` + +The `offset_row_count` or `fetch_row_count` value can be a number or a literal constant. You can omit `fetch_row_count`; by default, it equals to 1. + +`OFFSET` specifies the number of rows to skip before starting to return rows from the query result set. + +The `FETCH` specifies the maximum number of rows that can be in the result of a query. + +The `ONLY` option is used to return rows that immediately follow the rows omitted by the `OFFSET`. In this case the `FETCH` is an alternative to the [LIMIT](../../../sql-reference/statements/select/limit.md) clause. For example, the following query + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY; +``` + +is identical to the query + +``` sql +SELECT * FROM test_fetch ORDER BY a LIMIT 3 OFFSET 1; +``` + +The `WITH TIES` option is used to return any additional rows that tie for the last place in the result set according to the `ORDER BY` clause. For example, if `fetch_row_count` is set to 5 but two additional rows match the values of the `ORDER BY` columns in the fifth row, the result set will contain seven rows. + +!!! note "Note" + According to the standard, the `OFFSET` clause must come before the `FETCH` clause if both are present. + +!!! note "Note" + The real offset can also depend on the [offset](../../../operations/settings/settings.md#offset) setting. + +## Examples {#examples} + +Input table: + +``` text +┌─a─┬─b─┐ +│ 1 │ 1 │ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 1 │ 3 │ +│ 5 │ 4 │ +│ 0 │ 6 │ +│ 5 │ 7 │ +└───┴───┘ +``` + +Usage of the `ONLY` option: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS ONLY; +``` + +Result: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +└───┴───┘ +``` + +Usage of the `WITH TIES` option: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; +``` + +Result: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +│ 5 │ 7 │ +└───┴───┘ +``` diff --git a/docs/en/sql-reference/statements/select/order-by.md b/docs/en/sql-reference/statements/select/order-by.md index fb1df445db1..f19a785c6b7 100644 --- a/docs/en/sql-reference/statements/select/order-by.md +++ b/docs/en/sql-reference/statements/select/order-by.md @@ -400,84 +400,4 @@ returns └────────────┴────────────┴──────────┘ ``` -## OFFSET FETCH Clause {#offset-fetch} - -`OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query. - -``` sql -OFFSET offset_row_count {ROW | ROWS}] [FETCH {FIRST | NEXT} fetch_row_count {ROW | ROWS} {ONLY | WITH TIES}] -``` - -The `offset_row_count` or `fetch_row_count` value can be a number or a literal constant. You can omit `fetch_row_count`; by default, it equals 1. - -`OFFSET` specifies the number of rows to skip before starting to return rows from the query. - -The `FETCH` specifies the maximum number of rows that can be in the result of a query. - -The `ONLY` option is used to return rows that immediately follow the rows omitted by the `OFFSET`. In this case the `FETCH` is an alternative to the [LIMIT](../../../sql-reference/statements/select/limit.md) clause. For example, the following query - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY; -``` - -is identical to the query - -``` sql -SELECT * FROM test_fetch ORDER BY a LIMIT 3 OFFSET 1; -``` - -The `WITH TIES` option is used to return any additional rows that tie for the last place in the result set according to the `ORDER BY` clause. For example, if `fetch_row_count` is set to 5 but two additional rows match the values of the `ORDER BY` columns in the fifth row, the result set will contain seven rows. - -!!! note "Note" - According to the standard, the `OFFSET` clause must come before the `FETCH` clause if both are present. - -### Examples {#examples} - -Input table: - -``` text -┌─a─┬─b─┐ -│ 1 │ 1 │ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 1 │ 3 │ -│ 5 │ 4 │ -│ 0 │ 6 │ -│ 5 │ 7 │ -└───┴───┘ -``` - -Usage of the `ONLY` option: - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS ONLY; -``` - -Result: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 5 │ 4 │ -└───┴───┘ -``` - -Usage of the `WITH TIES` option: - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; -``` - -Result: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 5 │ 4 │ -│ 5 │ 7 │ -└───┴───┘ -``` - [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/select/order-by/) diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 2348a2a2668..7871894ccac 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -169,7 +169,7 @@ SYSTEM START MERGES [ON VOLUME | [db.]merge_tree_family_table_name ### STOP TTL MERGES {#query_language-stop-ttl-merges} Provides possibility to stop background delete old data according to [TTL expression](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) for tables in the MergeTree family: -Return `Ok.` even table doesn’t exists or table have not MergeTree engine. Return error when database doesn’t exists: +Returns `Ok.` even if table doesn’t exist or table has not MergeTree engine. Returns error when database doesn’t exist: ``` sql SYSTEM STOP TTL MERGES [[db.]merge_tree_family_table_name] @@ -178,7 +178,7 @@ SYSTEM STOP TTL MERGES [[db.]merge_tree_family_table_name] ### START TTL MERGES {#query_language-start-ttl-merges} Provides possibility to start background delete old data according to [TTL expression](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) for tables in the MergeTree family: -Return `Ok.` even table doesn’t exists. Return error when database doesn’t exists: +Returns `Ok.` even if table doesn’t exist. Returns error when database doesn’t exist: ``` sql SYSTEM START TTL MERGES [[db.]merge_tree_family_table_name] @@ -187,7 +187,7 @@ SYSTEM START TTL MERGES [[db.]merge_tree_family_table_name] ### STOP MOVES {#query_language-stop-moves} Provides possibility to stop background move data according to [TTL table expression with TO VOLUME or TO DISK clause](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family: -Return `Ok.` even table doesn’t exists. Return error when database doesn’t exists: +Returns `Ok.` even if table doesn’t exist. Returns error when database doesn’t exist: ``` sql SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] @@ -196,7 +196,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] ### START MOVES {#query_language-start-moves} Provides possibility to start background move data according to [TTL table expression with TO VOLUME and TO DISK clause](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) for tables in the MergeTree family: -Return `Ok.` even table doesn’t exists. Return error when database doesn’t exists: +Returns `Ok.` even if table doesn’t exist. Returns error when database doesn’t exist: ``` sql SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] @@ -209,7 +209,7 @@ ClickHouse can manage background replication related processes in [ReplicatedMer ### STOP FETCHES {#query_language-system-stop-fetches} Provides possibility to stop background fetches for inserted parts for tables in the `ReplicatedMergeTree` family: -Always returns `Ok.` regardless of the table engine and even table or database doesn’t exists. +Always returns `Ok.` regardless of the table engine and even if table or database doesn’t exist. ``` sql SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name] @@ -218,7 +218,7 @@ SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name] ### START FETCHES {#query_language-system-start-fetches} Provides possibility to start background fetches for inserted parts for tables in the `ReplicatedMergeTree` family: -Always returns `Ok.` regardless of the table engine and even table or database doesn’t exists. +Always returns `Ok.` regardless of the table engine and even if table or database doesn’t exist. ``` sql SYSTEM START FETCHES [[db.]replicated_merge_tree_family_table_name] @@ -264,9 +264,7 @@ Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name ``` -After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from -the common replicated log into its own replication queue, and then the query waits till the replica processes all -of the fetched commands. +After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. ### RESTART REPLICA {#query_language-system-restart-replica} @@ -280,4 +278,3 @@ SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name ### RESTART REPLICAS {#query_language-system-restart-replicas} Provides possibility to reinitialize Zookeeper sessions state for all `ReplicatedMergeTree` tables, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed - diff --git a/docs/en/sql-reference/statements/truncate.md b/docs/en/sql-reference/statements/truncate.md index a13936c88ab..f302a8605e2 100644 --- a/docs/en/sql-reference/statements/truncate.md +++ b/docs/en/sql-reference/statements/truncate.md @@ -11,4 +11,4 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] Removes all data from a table. When the clause `IF EXISTS` is omitted, the query returns an error if the table does not exist. -The `TRUNCATE` query is not supported for [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md) and [Null](../../engines/table-engines/special/null.md) table engines. +The `TRUNCATE` query is not supported for [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md), [Buffer](../../engines/table-engines/special/buffer.md) and [Null](../../engines/table-engines/special/null.md) table engines. diff --git a/docs/en/sql-reference/syntax.md b/docs/en/sql-reference/syntax.md index 5d0eee76393..573e35d2f71 100644 --- a/docs/en/sql-reference/syntax.md +++ b/docs/en/sql-reference/syntax.md @@ -171,7 +171,7 @@ Received exception from server (version 18.14.17): Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. ``` -In this example, we declared table `t` with column `b`. Then, when selecting data, we defined the `sum(b) AS b` alias. As aliases are global, ClickHouse substituted the literal `b` in the expression `argMax(a, b)` with the expression `sum(b)`. This substitution caused the exception. +In this example, we declared table `t` with column `b`. Then, when selecting data, we defined the `sum(b) AS b` alias. As aliases are global, ClickHouse substituted the literal `b` in the expression `argMax(a, b)` with the expression `sum(b)`. This substitution caused the exception. You can change this default behavior by setting [prefer_column_name_to_alias](../operations/settings/settings.md#prefer_column_name_to_alias) to `1`. ## Asterisk {#asterisk} diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index bfb5fdf9be6..3eab572ac12 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -65,9 +65,9 @@ postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2); INSERT 0 1 postgresql> SELECT * FROM test; - int_id | int_nullable | float | str | float_nullable ---------+--------------+-------+------+---------------- - 1 | | 2 | test | + int_id | int_nullable | float | str | float_nullable + --------+--------------+-------+------+---------------- + 1 | | 2 | test | (1 row) ``` diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 34f0607b94c..285ec862aab 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -18,7 +18,7 @@ s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compres - `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path). - `format` — The [format](../../interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. -- `compression` — Parameter is optional. Supported values: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. By default, it will autodetect compression by file extension. +- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension. **Returned value** diff --git a/docs/ja/commercial/cloud.md b/docs/ja/commercial/cloud.md index 84f58e46cdb..62fc75ecbda 100644 --- a/docs/ja/commercial/cloud.md +++ b/docs/ja/commercial/cloud.md @@ -22,7 +22,7 @@ toc_title: "\u30AF\u30E9\u30A6\u30C9" ## Alibaba Cloud {#alibaba-cloud} -ClickHouseのためのAlibaba Cloudの管理サービス [中国サイト](https://www.aliyun.com/product/clickhouse) (2021年5月に国際サイトで利用可能になります) 次の主な機能を提供します: +[ClickHouseのためのAlibaba Cloudの管理サービス](https://www.alibabacloud.com/product/clickhouse) 次の主な機能を提供します: - Alibaba Cloud Apsara分散システムをベースにした信頼性の高いクラウドディスクストレージエンジン - 手動でのデータ移行を必要とせずに、オン・デマンドで容量を拡張 diff --git a/docs/ja/development/build.md b/docs/ja/development/build.md index e44ba45485e..191fa665ccd 100644 --- a/docs/ja/development/build.md +++ b/docs/ja/development/build.md @@ -19,28 +19,17 @@ $ sudo apt-get install git cmake python ninja-build 古いシステムではcmakeの代わりにcmake3。 -## GCC9のインストール {#install-gcc-10} +## Clang 11 のインストール -これを行うにはいくつかの方法があります。 +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -### PPAパッケージからインストール {#install-from-a-ppa-package} - -``` bash -$ sudo apt-get install software-properties-common -$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test -$ sudo apt-get update -$ sudo apt-get install gcc-10 g++-10 +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" ``` -### ソースからインスト {#install-from-sources} - -見て [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) - -## ビルドにGCC9を使用する {#use-gcc-10-for-builds} - ``` bash -$ export CC=gcc-10 -$ export CXX=g++-10 +$ export CC=clang +$ export CXX=clang++ ``` ## ツつィツ姪"ツ債ツつケ {#checkout-clickhouse-sources} @@ -76,7 +65,7 @@ $ cd .. - Git(ソースをチェックアウトするためにのみ使用され、ビルドには必要ありません) - CMake3.10以降 - 忍者(推奨)または作る -- C++コンパイラ:gcc9またはclang8以降 +- C++コンパイラ:clang11以降 - リンカ:lldまたはgold(古典的なGNU ldは動作しません) - Python(LLVMビルド内でのみ使用され、オプションです) diff --git a/docs/ja/development/developer-instruction.md b/docs/ja/development/developer-instruction.md index ccc3a177d1f..d7e5217b3b6 100644 --- a/docs/ja/development/developer-instruction.md +++ b/docs/ja/development/developer-instruction.md @@ -133,19 +133,19 @@ ArchまたはGentooを使用する場合は、おそらくCMakeのインスト ClickHouseはビルドに複数の外部ライブラリを使用します。 それらのすべては、サブモジュールにあるソースからClickHouseと一緒に構築されているので、別々にインストールする必要はありません。 リストは次の場所で確認できます `contrib`. -# C++コンパイラ {#c-compiler} +## C++ Compiler {#c-compiler} -ClickHouseのビルドには、バージョン9以降のGCCとClangバージョン8以降のコンパイラがサポートされます。 +Compilers Clang starting from version 11 is supported for building ClickHouse. -公式のYandexビルドは、わずかに優れたパフォーマンスのマシンコードを生成するため、GCCを使用しています(私たちのベンチマークに応じて最大数パーセントの そしてClangは開発のために通常より便利です。 が、当社の継続的インテグレーション(CI)プラットフォームを運チェックのための十数の組み合わせとなります。 +Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations. -UBUNTUにGCCをインストールするには: `sudo apt install gcc g++` +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -Gccのバージョンを確認する: `gcc --version`. の場合は下記9その指示に従う。https://clickhouse.tech/docs/ja/development/build/#install-gcc-10. +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" +``` -Mac OS XのビルドはClangでのみサポートされています。 ちょうど実行 `brew install llvm` - -Clangを使用する場合は、次のものもインストールできます `libc++` と `lld` あなたがそれが何であるか知っていれば。 を使用して `ccache` また、推奨されます。 +Mac OS X build is also supported. Just run `brew install llvm` # 建築プロセス {#the-building-process} @@ -158,13 +158,6 @@ ClickHouseを構築する準備ができたので、別のディレクトリを 中の間 `build` cmakeを実行してビルドを構成します。 最初の実行の前に、コンパイラ(この例ではバージョン9gccコンパイラ)を指定する環境変数を定義する必要があります。 -Linux: - - export CC=gcc-10 CXX=g++-10 - cmake .. - -Mac OS X: - export CC=clang CXX=clang++ cmake .. diff --git a/docs/ja/getting-started/example-datasets/ontime.md b/docs/ja/getting-started/example-datasets/ontime.md index bd049e8caad..d12d8a36069 100644 --- a/docs/ja/getting-started/example-datasets/ontime.md +++ b/docs/ja/getting-started/example-datasets/ontime.md @@ -29,126 +29,127 @@ done テーブルの作成: ``` sql -CREATE TABLE `ontime` ( - `Year` UInt16, - `Quarter` UInt8, - `Month` UInt8, - `DayofMonth` UInt8, - `DayOfWeek` UInt8, - `FlightDate` Date, - `UniqueCarrier` FixedString(7), - `AirlineID` Int32, - `Carrier` FixedString(2), - `TailNum` String, - `FlightNum` String, - `OriginAirportID` Int32, - `OriginAirportSeqID` Int32, - `OriginCityMarketID` Int32, - `Origin` FixedString(5), - `OriginCityName` String, - `OriginState` FixedString(2), - `OriginStateFips` String, - `OriginStateName` String, - `OriginWac` Int32, - `DestAirportID` Int32, - `DestAirportSeqID` Int32, - `DestCityMarketID` Int32, - `Dest` FixedString(5), - `DestCityName` String, - `DestState` FixedString(2), - `DestStateFips` String, - `DestStateName` String, - `DestWac` Int32, - `CRSDepTime` Int32, - `DepTime` Int32, - `DepDelay` Int32, - `DepDelayMinutes` Int32, - `DepDel15` Int32, - `DepartureDelayGroups` String, - `DepTimeBlk` String, - `TaxiOut` Int32, - `WheelsOff` Int32, - `WheelsOn` Int32, - `TaxiIn` Int32, - `CRSArrTime` Int32, - `ArrTime` Int32, - `ArrDelay` Int32, - `ArrDelayMinutes` Int32, - `ArrDel15` Int32, - `ArrivalDelayGroups` Int32, - `ArrTimeBlk` String, - `Cancelled` UInt8, - `CancellationCode` FixedString(1), - `Diverted` UInt8, - `CRSElapsedTime` Int32, - `ActualElapsedTime` Int32, - `AirTime` Int32, - `Flights` Int32, - `Distance` Int32, - `DistanceGroup` UInt8, - `CarrierDelay` Int32, - `WeatherDelay` Int32, - `NASDelay` Int32, - `SecurityDelay` Int32, - `LateAircraftDelay` Int32, - `FirstDepTime` String, - `TotalAddGTime` String, - `LongestAddGTime` String, - `DivAirportLandings` String, - `DivReachedDest` String, - `DivActualElapsedTime` String, - `DivArrDelay` String, - `DivDistance` String, - `Div1Airport` String, - `Div1AirportID` Int32, - `Div1AirportSeqID` Int32, - `Div1WheelsOn` String, - `Div1TotalGTime` String, - `Div1LongestGTime` String, - `Div1WheelsOff` String, - `Div1TailNum` String, - `Div2Airport` String, - `Div2AirportID` Int32, - `Div2AirportSeqID` Int32, - `Div2WheelsOn` String, - `Div2TotalGTime` String, - `Div2LongestGTime` String, - `Div2WheelsOff` String, - `Div2TailNum` String, - `Div3Airport` String, - `Div3AirportID` Int32, - `Div3AirportSeqID` Int32, - `Div3WheelsOn` String, - `Div3TotalGTime` String, - `Div3LongestGTime` String, - `Div3WheelsOff` String, - `Div3TailNum` String, - `Div4Airport` String, - `Div4AirportID` Int32, - `Div4AirportSeqID` Int32, - `Div4WheelsOn` String, - `Div4TotalGTime` String, - `Div4LongestGTime` String, - `Div4WheelsOff` String, - `Div4TailNum` String, - `Div5Airport` String, - `Div5AirportID` Int32, - `Div5AirportSeqID` Int32, - `Div5WheelsOn` String, - `Div5TotalGTime` String, - `Div5LongestGTime` String, - `Div5WheelsOff` String, - `Div5TailNum` String +CREATE TABLE `ontime` +( + `Year` UInt16, + `Quarter` UInt8, + `Month` UInt8, + `DayofMonth` UInt8, + `DayOfWeek` UInt8, + `FlightDate` Date, + `Reporting_Airline` String, + `DOT_ID_Reporting_Airline` Int32, + `IATA_CODE_Reporting_Airline` String, + `Tail_Number` Int32, + `Flight_Number_Reporting_Airline` String, + `OriginAirportID` Int32, + `OriginAirportSeqID` Int32, + `OriginCityMarketID` Int32, + `Origin` FixedString(5), + `OriginCityName` String, + `OriginState` FixedString(2), + `OriginStateFips` String, + `OriginStateName` String, + `OriginWac` Int32, + `DestAirportID` Int32, + `DestAirportSeqID` Int32, + `DestCityMarketID` Int32, + `Dest` FixedString(5), + `DestCityName` String, + `DestState` FixedString(2), + `DestStateFips` String, + `DestStateName` String, + `DestWac` Int32, + `CRSDepTime` Int32, + `DepTime` Int32, + `DepDelay` Int32, + `DepDelayMinutes` Int32, + `DepDel15` Int32, + `DepartureDelayGroups` String, + `DepTimeBlk` String, + `TaxiOut` Int32, + `WheelsOff` Int32, + `WheelsOn` Int32, + `TaxiIn` Int32, + `CRSArrTime` Int32, + `ArrTime` Int32, + `ArrDelay` Int32, + `ArrDelayMinutes` Int32, + `ArrDel15` Int32, + `ArrivalDelayGroups` Int32, + `ArrTimeBlk` String, + `Cancelled` UInt8, + `CancellationCode` FixedString(1), + `Diverted` UInt8, + `CRSElapsedTime` Int32, + `ActualElapsedTime` Int32, + `AirTime` Nullable(Int32), + `Flights` Int32, + `Distance` Int32, + `DistanceGroup` UInt8, + `CarrierDelay` Int32, + `WeatherDelay` Int32, + `NASDelay` Int32, + `SecurityDelay` Int32, + `LateAircraftDelay` Int32, + `FirstDepTime` String, + `TotalAddGTime` String, + `LongestAddGTime` String, + `DivAirportLandings` String, + `DivReachedDest` String, + `DivActualElapsedTime` String, + `DivArrDelay` String, + `DivDistance` String, + `Div1Airport` String, + `Div1AirportID` Int32, + `Div1AirportSeqID` Int32, + `Div1WheelsOn` String, + `Div1TotalGTime` String, + `Div1LongestGTime` String, + `Div1WheelsOff` String, + `Div1TailNum` String, + `Div2Airport` String, + `Div2AirportID` Int32, + `Div2AirportSeqID` Int32, + `Div2WheelsOn` String, + `Div2TotalGTime` String, + `Div2LongestGTime` String, + `Div2WheelsOff` String, + `Div2TailNum` String, + `Div3Airport` String, + `Div3AirportID` Int32, + `Div3AirportSeqID` Int32, + `Div3WheelsOn` String, + `Div3TotalGTime` String, + `Div3LongestGTime` String, + `Div3WheelsOff` String, + `Div3TailNum` String, + `Div4Airport` String, + `Div4AirportID` Int32, + `Div4AirportSeqID` Int32, + `Div4WheelsOn` String, + `Div4TotalGTime` String, + `Div4LongestGTime` String, + `Div4WheelsOff` String, + `Div4TailNum` String, + `Div5Airport` String, + `Div5AirportID` Int32, + `Div5AirportSeqID` Int32, + `Div5WheelsOn` String, + `Div5TotalGTime` String, + `Div5LongestGTime` String, + `Div5WheelsOff` String, + `Div5TailNum` String ) ENGINE = MergeTree -PARTITION BY Year -ORDER BY (Carrier, FlightDate) -SETTINGS index_granularity = 8192; + PARTITION BY Year + ORDER BY (IATA_CODE_Reporting_Airline, FlightDate) + SETTINGS index_granularity = 8192; ``` データのロード: ``` bash -$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done +ls -1 *.zip | xargs -I{} -P $(nproc) bash -c "echo {}; unzip -cq {} '*.csv' | sed 's/\.00//g' | clickhouse-client --input_format_with_names_use_header=0 --query='INSERT INTO ontime FORMAT CSVWithNames'" ``` ## パーティション済みデータのダウンロード {#download-of-prepared-partitions} @@ -212,10 +213,10 @@ LIMIT 10; Q4. 2007年のキャリア別の遅延の数 ``` sql -SELECT Carrier, count(*) +SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*) FROM ontime WHERE DepDelay>10 AND Year=2007 -GROUP BY Carrier +GROUP BY IATA_CODE_Reporting_Airline ORDER BY count(*) DESC; ``` @@ -226,32 +227,32 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year=2007 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` 同じクエリのより良いバージョン: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year=2007 -GROUP BY Carrier +GROUP BY IATA_CODE_Reporting_Airline ORDER BY c3 DESC ``` @@ -262,29 +263,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` 同じクエリのより良いバージョン: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier @@ -303,7 +304,7 @@ FROM from ontime WHERE DepDelay>10 GROUP BY Year -) +) q JOIN ( select @@ -311,7 +312,7 @@ JOIN count(*) as c2 from ontime GROUP BY Year -) USING (Year) +) qq USING (Year) ORDER BY Year; ``` @@ -346,7 +347,7 @@ Q10. ``` sql SELECT - min(Year), max(Year), Carrier, count(*) AS cnt, + min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt, sum(ArrDelayMinutes>30) AS flights_delayed, round(sum(ArrDelayMinutes>30)/count(*),2) AS rate FROM ontime diff --git a/docs/ja/sql-reference/functions/bitmap-functions.md b/docs/ja/sql-reference/functions/bitmap-functions.md index cc57e762610..de3ce938444 100644 --- a/docs/ja/sql-reference/functions/bitmap-functions.md +++ b/docs/ja/sql-reference/functions/bitmap-functions.md @@ -35,7 +35,7 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res) ``` text ┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ -│  │ AggregateFunction(groupBitmap, UInt8) │ +│ │ AggregateFunction(groupBitmap, UInt8) │ └─────┴──────────────────────────────────────────────┘ ``` diff --git a/docs/ja/sql-reference/functions/hash-functions.md b/docs/ja/sql-reference/functions/hash-functions.md index d48e6846bb4..a98ae60690d 100644 --- a/docs/ja/sql-reference/functions/hash-functions.md +++ b/docs/ja/sql-reference/functions/hash-functions.md @@ -434,13 +434,13 @@ A [FixedString(16)](../../sql-reference/data-types/fixedstring.md) データ型 **例** ``` sql -SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type +SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` ``` text -┌─MurmurHash3──────┬─type────────────┐ -│ 6�1�4"S5KT�~~q │ FixedString(16) │ -└──────────────────┴─────────────────┘ +┌─MurmurHash3──────────────────────┬─type───┐ +│ 368A1A311CB7342253354B548E7E7E71 │ String │ +└──────────────────────────────────┴────────┘ ``` ## xxHash32,xxHash64 {#hash-functions-xxhash32} diff --git a/docs/ja/sql-reference/statements/select/index.md b/docs/ja/sql-reference/statements/select/index.md deleted file mode 120000 index 9c649322c82..00000000000 --- a/docs/ja/sql-reference/statements/select/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/statements/select/index.md \ No newline at end of file diff --git a/docs/ja/sql-reference/statements/select/index.md b/docs/ja/sql-reference/statements/select/index.md new file mode 100644 index 00000000000..b1a97ba1b28 --- /dev/null +++ b/docs/ja/sql-reference/statements/select/index.md @@ -0,0 +1,283 @@ +--- +title: SELECT Query +toc_folder_title: SELECT +toc_priority: 32 +toc_title: Overview +--- + +# SELECT Query {#select-queries-syntax} + +`SELECT` queries perform data retrieval. By default, the requested data is returned to the client, while in conjunction with [INSERT INTO](../../../sql-reference/statements/insert-into.md) it can be forwarded to a different table. + +## Syntax {#syntax} + +``` sql +[WITH expr_list|(subquery)] +SELECT [DISTINCT] expr_list +[FROM [db.]table | (subquery) | table_function] [FINAL] +[SAMPLE sample_coeff] +[ARRAY JOIN ...] +[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN (subquery)|table (ON )|(USING ) +[PREWHERE expr] +[WHERE expr] +[GROUP BY expr_list] [WITH ROLLUP|WITH CUBE] [WITH TOTALS] +[HAVING expr] +[ORDER BY expr_list] [WITH FILL] [FROM expr] [TO expr] [STEP expr] +[LIMIT [offset_value, ]n BY columns] +[LIMIT [n, ]m] [WITH TIES] +[SETTINGS ...] +[UNION ...] +[INTO OUTFILE filename] +[FORMAT format] +``` + +All clauses are optional, except for the required list of expressions immediately after `SELECT` which is covered in more detail [below](#select-clause). + +Specifics of each optional clause are covered in separate sections, which are listed in the same order as they are executed: + +- [WITH clause](../../../sql-reference/statements/select/with.md) +- [FROM clause](../../../sql-reference/statements/select/from.md) +- [SAMPLE clause](../../../sql-reference/statements/select/sample.md) +- [JOIN clause](../../../sql-reference/statements/select/join.md) +- [PREWHERE clause](../../../sql-reference/statements/select/prewhere.md) +- [WHERE clause](../../../sql-reference/statements/select/where.md) +- [GROUP BY clause](../../../sql-reference/statements/select/group-by.md) +- [LIMIT BY clause](../../../sql-reference/statements/select/limit-by.md) +- [HAVING clause](../../../sql-reference/statements/select/having.md) +- [SELECT clause](#select-clause) +- [DISTINCT clause](../../../sql-reference/statements/select/distinct.md) +- [LIMIT clause](../../../sql-reference/statements/select/limit.md) +- [OFFSET clause](../../../sql-reference/statements/select/offset.md) +- [UNION clause](../../../sql-reference/statements/select/union.md) +- [INTO OUTFILE clause](../../../sql-reference/statements/select/into-outfile.md) +- [FORMAT clause](../../../sql-reference/statements/select/format.md) + +## SELECT Clause {#select-clause} + +[Expressions](../../../sql-reference/syntax.md#syntax-expressions) specified in the `SELECT` clause are calculated after all the operations in the clauses described above are finished. These expressions work as if they apply to separate rows in the result. If expressions in the `SELECT` clause contain aggregate functions, then ClickHouse processes aggregate functions and expressions used as their arguments during the [GROUP BY](../../../sql-reference/statements/select/group-by.md) aggregation. + +If you want to include all columns in the result, use the asterisk (`*`) symbol. For example, `SELECT * FROM ...`. + + +### COLUMNS expression {#columns-expression} + +To match some columns in the result with a [re2](https://en.wikipedia.org/wiki/RE2_(software)) regular expression, you can use the `COLUMNS` expression. + +``` sql +COLUMNS('regexp') +``` + +For example, consider the table: + +``` sql +CREATE TABLE default.col_names (aa Int8, ab Int8, bc Int8) ENGINE = TinyLog +``` + +The following query selects data from all the columns containing the `a` symbol in their name. + +``` sql +SELECT COLUMNS('a') FROM col_names +``` + +``` text +┌─aa─┬─ab─┐ +│ 1 │ 1 │ +└────┴────┘ +``` + +The selected columns are returned not in the alphabetical order. + +You can use multiple `COLUMNS` expressions in a query and apply functions to them. + +For example: + +``` sql +SELECT COLUMNS('a'), COLUMNS('c'), toTypeName(COLUMNS('c')) FROM col_names +``` + +``` text +┌─aa─┬─ab─┬─bc─┬─toTypeName(bc)─┐ +│ 1 │ 1 │ 1 │ Int8 │ +└────┴────┴────┴────────────────┘ +``` + +Each column returned by the `COLUMNS` expression is passed to the function as a separate argument. Also you can pass other arguments to the function if it supports them. Be careful when using functions. If a function doesn’t support the number of arguments you have passed to it, ClickHouse throws an exception. + +For example: + +``` sql +SELECT COLUMNS('a') + COLUMNS('c') FROM col_names +``` + +``` text +Received exception from server (version 19.14.1): +Code: 42. DB::Exception: Received from localhost:9000. DB::Exception: Number of arguments for function plus doesn't match: passed 3, should be 2. +``` + +In this example, `COLUMNS('a')` returns two columns: `aa` and `ab`. `COLUMNS('c')` returns the `bc` column. The `+` operator can’t apply to 3 arguments, so ClickHouse throws an exception with the relevant message. + +Columns that matched the `COLUMNS` expression can have different data types. If `COLUMNS` doesn’t match any columns and is the only expression in `SELECT`, ClickHouse throws an exception. + +### Asterisk {#asterisk} + +You can put an asterisk in any part of a query instead of an expression. When the query is analyzed, the asterisk is expanded to a list of all table columns (excluding the `MATERIALIZED` and `ALIAS` columns). There are only a few cases when using an asterisk is justified: + +- When creating a table dump. +- For tables containing just a few columns, such as system tables. +- For getting information about what columns are in a table. In this case, set `LIMIT 1`. But it is better to use the `DESC TABLE` query. +- When there is strong filtration on a small number of columns using `PREWHERE`. +- In subqueries (since columns that aren’t needed for the external query are excluded from subqueries). + +In all other cases, we don’t recommend using the asterisk, since it only gives you the drawbacks of a columnar DBMS instead of the advantages. In other words using the asterisk is not recommended. + +### Extreme Values {#extreme-values} + +In addition to results, you can also get minimum and maximum values for the results columns. To do this, set the **extremes** setting to 1. Minimums and maximums are calculated for numeric types, dates, and dates with times. For other columns, the default values are output. + +An extra two rows are calculated – the minimums and maximums, respectively. These extra two rows are output in `JSON*`, `TabSeparated*`, and `Pretty*` [formats](../../../interfaces/formats.md), separate from the other rows. They are not output for other formats. + +In `JSON*` formats, the extreme values are output in a separate ‘extremes’ field. In `TabSeparated*` formats, the row comes after the main result, and after ‘totals’ if present. It is preceded by an empty row (after the other data). In `Pretty*` formats, the row is output as a separate table after the main result, and after `totals` if present. + +Extreme values are calculated for rows before `LIMIT`, but after `LIMIT BY`. However, when using `LIMIT offset, size`, the rows before `offset` are included in `extremes`. In stream requests, the result may also include a small number of rows that passed through `LIMIT`. + +### Notes {#notes} + +You can use synonyms (`AS` aliases) in any part of a query. + +The `GROUP BY` and `ORDER BY` clauses do not support positional arguments. This contradicts MySQL, but conforms to standard SQL. For example, `GROUP BY 1, 2` will be interpreted as grouping by constants (i.e. aggregation of all rows into one). + +## Implementation Details {#implementation-details} + +If the query omits the `DISTINCT`, `GROUP BY` and `ORDER BY` clauses and the `IN` and `JOIN` subqueries, the query will be completely stream processed, using O(1) amount of RAM. Otherwise, the query might consume a lot of RAM if the appropriate restrictions are not specified: + +- `max_memory_usage` +- `max_rows_to_group_by` +- `max_rows_to_sort` +- `max_rows_in_distinct` +- `max_bytes_in_distinct` +- `max_rows_in_set` +- `max_bytes_in_set` +- `max_rows_in_join` +- `max_bytes_in_join` +- `max_bytes_before_external_sort` +- `max_bytes_before_external_group_by` + +For more information, see the section “Settings”. It is possible to use external sorting (saving temporary tables to a disk) and external aggregation. + +## SELECT modifiers {#select-modifiers} + +You can use the following modifiers in `SELECT` queries. + +### APPLY {#apply-modifier} + +Allows you to invoke some function for each row returned by an outer table expression of a query. + +**Syntax:** + +``` sql +SELECT APPLY( ) FROM [db.]table_name +``` + +**Example:** + +``` sql +CREATE TABLE columns_transformers (i Int64, j Int16, k Int64) ENGINE = MergeTree ORDER by (i); +INSERT INTO columns_transformers VALUES (100, 10, 324), (120, 8, 23); +SELECT * APPLY(sum) FROM columns_transformers; +``` + +``` +┌─sum(i)─┬─sum(j)─┬─sum(k)─┐ +│ 220 │ 18 │ 347 │ +└────────┴────────┴────────┘ +``` + +### EXCEPT {#except-modifier} + +Specifies the names of one or more columns to exclude from the result. All matching column names are omitted from the output. + +**Syntax:** + +``` sql +SELECT EXCEPT ( col_name1 [, col_name2, col_name3, ...] ) FROM [db.]table_name +``` + +**Example:** + +``` sql +SELECT * EXCEPT (i) from columns_transformers; +``` + +``` +┌──j─┬───k─┐ +│ 10 │ 324 │ +│ 8 │ 23 │ +└────┴─────┘ +``` + +### REPLACE {#replace-modifier} + +Specifies one or more [expression aliases](../../../sql-reference/syntax.md#syntax-expression_aliases). Each alias must match a column name from the `SELECT *` statement. In the output column list, the column that matches the alias is replaced by the expression in that `REPLACE`. + +This modifier does not change the names or order of columns. However, it can change the value and the value type. + +**Syntax:** + +``` sql +SELECT REPLACE( AS col_name) from [db.]table_name +``` + +**Example:** + +``` sql +SELECT * REPLACE(i + 1 AS i) from columns_transformers; +``` + +``` +┌───i─┬──j─┬───k─┐ +│ 101 │ 10 │ 324 │ +│ 121 │ 8 │ 23 │ +└─────┴────┴─────┘ +``` + +### Modifier Combinations {#modifier-combinations} + +You can use each modifier separately or combine them. + +**Examples:** + +Using the same modifier multiple times. + +``` sql +SELECT COLUMNS('[jk]') APPLY(toString) APPLY(length) APPLY(max) from columns_transformers; +``` + +``` +┌─max(length(toString(j)))─┬─max(length(toString(k)))─┐ +│ 2 │ 3 │ +└──────────────────────────┴──────────────────────────┘ +``` + +Using multiple modifiers in a single query. + +``` sql +SELECT * REPLACE(i + 1 AS i) EXCEPT (j) APPLY(sum) from columns_transformers; +``` + +``` +┌─sum(plus(i, 1))─┬─sum(k)─┐ +│ 222 │ 347 │ +└─────────────────┴────────┘ +``` + +## SETTINGS in SELECT Query {#settings-in-select} + +You can specify the necessary settings right in the `SELECT` query. The setting value is applied only to this query and is reset to default or previous value after the query is executed. + +Other ways to make settings see [here](../../../operations/settings/index.md). + +**Example** + +``` sql +SELECT * FROM some_table SETTINGS optimize_read_in_order=1, cast_keep_nullable=1; +``` diff --git a/docs/ja/sql-reference/statements/select/offset.md b/docs/ja/sql-reference/statements/select/offset.md new file mode 100644 index 00000000000..3efd916bcb8 --- /dev/null +++ b/docs/ja/sql-reference/statements/select/offset.md @@ -0,0 +1,86 @@ +--- +toc_title: OFFSET +--- + +# OFFSET FETCH Clause {#offset-fetch} + +`OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query. + +``` sql +OFFSET offset_row_count {ROW | ROWS}] [FETCH {FIRST | NEXT} fetch_row_count {ROW | ROWS} {ONLY | WITH TIES}] +``` + +The `offset_row_count` or `fetch_row_count` value can be a number or a literal constant. You can omit `fetch_row_count`; by default, it equals to 1. + +`OFFSET` specifies the number of rows to skip before starting to return rows from the query result set. + +The `FETCH` specifies the maximum number of rows that can be in the result of a query. + +The `ONLY` option is used to return rows that immediately follow the rows omitted by the `OFFSET`. In this case the `FETCH` is an alternative to the [LIMIT](../../../sql-reference/statements/select/limit.md) clause. For example, the following query + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY; +``` + +is identical to the query + +``` sql +SELECT * FROM test_fetch ORDER BY a LIMIT 3 OFFSET 1; +``` + +The `WITH TIES` option is used to return any additional rows that tie for the last place in the result set according to the `ORDER BY` clause. For example, if `fetch_row_count` is set to 5 but two additional rows match the values of the `ORDER BY` columns in the fifth row, the result set will contain seven rows. + +!!! note "Note" + According to the standard, the `OFFSET` clause must come before the `FETCH` clause if both are present. + +!!! note "Note" + The real offset can also depend on the [offset](../../../operations/settings/settings.md#offset) setting. + +## Examples {#examples} + +Input table: + +``` text +┌─a─┬─b─┐ +│ 1 │ 1 │ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 1 │ 3 │ +│ 5 │ 4 │ +│ 0 │ 6 │ +│ 5 │ 7 │ +└───┴───┘ +``` + +Usage of the `ONLY` option: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS ONLY; +``` + +Result: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +└───┴───┘ +``` + +Usage of the `WITH TIES` option: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; +``` + +Result: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +│ 5 │ 7 │ +└───┴───┘ +``` diff --git a/docs/ru/commercial/cloud.md b/docs/ru/commercial/cloud.md index 6c192a34f9f..e00fc3be673 100644 --- a/docs/ru/commercial/cloud.md +++ b/docs/ru/commercial/cloud.md @@ -39,4 +39,20 @@ toc_title: "Поставщики облачных услуг ClickHouse" - поддержка прав доступа, one-key восстановления, многоуровневая защита сети, шифрование облачного диска; - полная интеграция с облачными системами логирования, базами данных и инструментами обработки данных; - встроенная платформа для мониторинга и управления базами данных; -- техническая поддержка от экспертов по работе с базами данных. \ No newline at end of file +- техническая поддержка от экспертов по работе с базами данных. + +## SberCloud {#sbercloud} + +[Облачная платформа SberCloud.Advanced](https://sbercloud.ru/ru/advanced): + +- предоставляет более 50 высокотехнологичных сервисов; +- позволяет быстро создавать и эффективно управлять ИТ-инфраструктурой, приложениями и интернет-сервисами; +- радикально минимизирует ресурсы, требуемые для работы корпоративных ИТ-систем; +- в разы сокращает время вывода новых продуктов на рынок. + +SberCloud.Advanced предоставляет [MapReduce Service (MRS)](https://docs.sbercloud.ru/mrs/ug/topics/ug__clickhouse.html) — надежную, безопасную и простую в использовании платформу корпоративного уровня для хранения, обработки и анализа больших данных. MRS позволяет быстро создавать и управлять кластерами ClickHouse. + +- Инстанс ClickHouse состоит из трех узлов ZooKeeper и нескольких узлов ClickHouse. Выделенный режим реплики используется для обеспечения высокой надежности двойных копий данных. +- MRS предлагает возможности гибкого масштабирования при быстром росте сервисов в сценариях, когда емкости кластерного хранилища или вычислительных ресурсов процессора недостаточно. MRS в один клик предоставляет инструмент для балансировки данных при расширении узлов ClickHouse в кластере. Вы можете определить режим и время балансировки данных на основе характеристик сервиса, чтобы обеспечить доступность сервиса. +- MRS использует архитектуру развертывания высокой доступности на основе Elastic Load Balance (ELB) — сервиса для автоматического распределения трафика на несколько внутренних узлов. Благодаря ELB, данные записываются в локальные таблицы и считываются из распределенных таблиц на разных узлах. Такая архитектура повышает отказоустойчивость кластера и гарантирует высокую доступность приложений. + diff --git a/docs/ru/development/developer-instruction.md b/docs/ru/development/developer-instruction.md index 66bbb9030cf..463d38a44fb 100644 --- a/docs/ru/development/developer-instruction.md +++ b/docs/ru/development/developer-instruction.md @@ -136,18 +136,18 @@ ClickHouse использует для сборки некоторое коли ## Компилятор C++ {#kompiliator-c} -В качестве компилятора C++ поддерживается GCC начиная с версии 9 или Clang начиная с версии 8. +В качестве компилятора C++ поддерживается Clang начиная с версии 11. -Официальные сборки от Яндекса, на данный момент, используют GCC, так как он генерирует слегка более производительный машинный код (разница в среднем до нескольких процентов по нашим бенчмаркам). Clang обычно более удобен для разработки. Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки. +Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки, включая gcc, но сборка с помощью gcc непригодна для использования в продакшене. -Для установки GCC под Ubuntu, выполните: `sudo apt install gcc g++`. +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -Проверьте версию gcc: `gcc --version`. Если версия меньше 10, то следуйте инструкции: https://clickhouse.tech/docs/ru/development/build/#install-gcc-10. +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" +``` Сборка под Mac OS X поддерживается только для компилятора Clang. Чтобы установить его выполните `brew install llvm` -Если вы решили использовать Clang, вы также можете установить `libc++` и `lld`, если вы знаете, что это такое. При желании, установите `ccache`. - ## Процесс сборки {#protsess-sborki} Теперь вы готовы к сборке ClickHouse. Для размещения собранных файлов, рекомендуется создать отдельную директорию build внутри директории ClickHouse: @@ -158,14 +158,7 @@ ClickHouse использует для сборки некоторое коли Вы можете иметь несколько разных директорий (build_release, build_debug) для разных вариантов сборки. Находясь в директории build, выполните конфигурацию сборки с помощью CMake. -Перед первым запуском необходимо выставить переменные окружения, отвечающие за выбор компилятора (в данном примере это - gcc версии 9). - -Linux: - - export CC=gcc-10 CXX=g++-10 - cmake .. - -Mac OS X: +Перед первым запуском необходимо выставить переменные окружения, отвечающие за выбор компилятора. export CC=clang CXX=clang++ cmake .. diff --git a/docs/ru/development/style.md b/docs/ru/development/style.md index f08ecc3c4c7..de29e629ceb 100644 --- a/docs/ru/development/style.md +++ b/docs/ru/development/style.md @@ -747,7 +747,7 @@ The dictionary is configured incorrectly. Есть два основных варианта проверки на такие ошибки: * Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке. -* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок. +* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок. Пример сообщения, у которого должен быть код `LOGICAL_ERROR`: `Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!` @@ -780,7 +780,7 @@ The dictionary is configured incorrectly. **2.** Язык - C++20 (см. список доступных [C++20 фич](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)). -**3.** Компилятор - `gcc`. На данный момент (август 2020), код собирается версией 9.3. (Также код может быть собран `clang` версий 10 и 9) +**3.** Компилятор - `clang`. На данный момент (апрель 2021), код собирается версией 11. (Также код может быть собран `gcc` версии 10, но такая сборка не тестируется и непригодна для продакшена). Используется стандартная библиотека (реализация `libc++`). diff --git a/docs/ru/engines/table-engines/index.md b/docs/ru/engines/table-engines/index.md index a364a3cb972..b17b2124250 100644 --- a/docs/ru/engines/table-engines/index.md +++ b/docs/ru/engines/table-engines/index.md @@ -48,6 +48,14 @@ toc_title: "Введение" Движки семейства: +- [Kafka](integrations/kafka.md#kafka) +- [MySQL](integrations/mysql.md#mysql) +- [ODBC](integrations/odbc.md#table-engine-odbc) +- [JDBC](integrations/jdbc.md#table-engine-jdbc) +- [S3](integrations/s3.md#table-engine-s3) + +### Специальные движки {#spetsialnye-dvizhki} + - [ODBC](../../engines/table-engines/integrations/odbc.md) - [JDBC](../../engines/table-engines/integrations/jdbc.md) - [MySQL](../../engines/table-engines/integrations/mysql.md) @@ -84,4 +92,3 @@ toc_title: "Введение" Чтобы получить данные из виртуального столбца, необходимо указать его название в запросе `SELECT`. `SELECT *` не отображает данные из виртуальных столбцов. При создании таблицы со столбцом, имя которого совпадает с именем одного из виртуальных столбцов таблицы, виртуальный столбец становится недоступным. Не делайте так. Чтобы помочь избежать конфликтов, имена виртуальных столбцов обычно предваряются подчеркиванием. - diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index 8964b1dbf02..cb8e38ae5c9 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -22,7 +22,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Структура таблицы может отличаться от исходной структуры таблицы PostgreSQL: -- Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. +- Имена столбцов должны быть такими же, как в исходной таблице PostgreSQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [приводить](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. - Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. @@ -94,10 +94,10 @@ postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2); INSERT 0 1 postgresql> SELECT * FROM test; - int_id | int_nullable | float | str | float_nullable ---------+--------------+-------+------+---------------- - 1 | | 2 | test | -(1 row) + int_id | int_nullable | float | str | float_nullable + --------+--------------+-------+------+---------------- + 1 | | 2 | test | + (1 row) ``` Таблица в ClickHouse, получение данных из PostgreSQL таблицы, созданной выше: diff --git a/docs/ru/engines/table-engines/integrations/s3.md b/docs/ru/engines/table-engines/integrations/s3.md index fa10e8ebc34..177d69dc3e0 100644 --- a/docs/ru/engines/table-engines/integrations/s3.md +++ b/docs/ru/engines/table-engines/integrations/s3.md @@ -11,21 +11,21 @@ toc_title: S3 ``` sql CREATE TABLE s3_engine_table (name String, value UInt32) -ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) +ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) ``` **Параметры движка** - `path` — URL-адрес бакета с указанием пути к файлу. Поддерживает следующие подстановочные знаки в режиме "только чтение": `*`, `?`, `{abc,def}` и `{N..M}` где `N`, `M` — числа, `'abc'`, `'def'` — строки. Подробнее смотри [ниже](#wildcards-in-path). - `format` — [формат](../../../interfaces/formats.md#formats) файла. -- `structure` — структура таблицы в формате `'column1_name column1_type, column2_name column2_type, ...'`. -- `compression` — тип сжатия. Возможные значения: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. Необязательный параметр. Если не указано, то тип сжатия определяется автоматически по расширению файла. +- `aws_access_key_id`, `aws_secret_access_key` - данные пользователя учетной записи [AWS](https://aws.amazon.com/ru/). Вы можете использовать их для аутентификации ваших запросов. Необязательный параметр. Если параметры учетной записи не указаны, то используются данные из конфигурационного файла. Смотрите подробнее [Использование сервиса S3 для хранения данных](../mergetree-family/mergetree.md#table_engine-mergetree-s3). +- `compression` — тип сжатия. Возможные значения: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Необязательный параметр. Если не указано, то тип сжатия определяется автоматически по расширению файла. **Пример** ``` sql CREATE TABLE s3_engine_table (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip'); +ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip'); INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); SELECT * FROM s3_engine_table LIMIT 2; ``` @@ -73,17 +73,19 @@ SELECT * FROM s3_engine_table LIMIT 2; Соображение безопасности: если злонамеренный пользователь попробует указать произвольные URL-адреса S3, параметр `s3_max_redirects` должен быть установлен в ноль, чтобы избежать атак [SSRF] (https://en.wikipedia.org/wiki/Server-side_request_forgery). Как альтернатива, в конфигурации сервера должен быть указан `remote_host_filter`. -## Настройки конечных точек {#endpoint-settings} +## Настройки точки приема запроса {#endpoint-settings} -Для конечной точки (которая соответствует точному префиксу URL-адреса) в конфигурационном файле могут быть заданы следующие настройки: +Для точки приема запроса (которая соответствует точному префиксу URL-адреса) в конфигурационном файле могут быть заданы следующие настройки: Обязательная настройка: -- `endpoint` — указывает префикс конечной точки. +- `endpoint` — указывает префикс точки приема запроса. Необязательные настройки: -- `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной конечной точкой. -- `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных Amazon EC2 для данной конечной точки. Значение по умолчанию - `false`. -- `header` — добавляет указанный HTTP-заголовок к запросу на заданную конечную точку. Может быть определен несколько раз. +- `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной точкой приема запроса. +- `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных [Amazon EC2](https://ru.wikipedia.org/wiki/Amazon_EC2) для данной точки приема запроса. Значение по умолчанию — `false`. +- `use_insecure_imds_request` — признак использования менее безопасного соединения при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию — `false`. +- `region` — название региона S3. +- `header` — добавляет указанный HTTP-заголовок к запросу на заданную точку приема запроса. Может быть определен несколько раз. - `server_side_encryption_customer_key_base64` — устанавливает необходимые заголовки для доступа к объектам S3 с шифрованием SSE-C. **Пример** @@ -94,7 +96,9 @@ SELECT * FROM s3_engine_table LIMIT 2; https://storage.yandexcloud.net/my-test-bucket-768/ + + @@ -103,7 +107,7 @@ SELECT * FROM s3_engine_table LIMIT 2; ## Примеры использования {#usage-examples} -Предположим, у нас есть несколько файлов в формате TSV со следующими URL-адресами в HDFS: +Предположим, у нас есть несколько файлов в формате TSV со следующими URL-адресами в S3: - 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv' - 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv' @@ -133,8 +137,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV'); ``` -!!! warning "Warning" - Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`. +Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`. 4. Создание таблицы из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`: @@ -142,9 +145,7 @@ ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_p CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); ``` + **Смотрите также** - [Табличная функция S3](../../../sql-reference/table-functions/s3.md) - -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/s3/) - diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index 7d7641a417d..4cff6fcfb80 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -727,6 +727,7 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' https://storage.yandexcloud.net/my-bucket/root-path/ your_access_key_id your_secret_access_key + http://proxy1 http://proxy2 @@ -753,7 +754,9 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' Необязательные параметры: -- `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`. +- `region` — название региона S3. +- `use_environment_credentials` — признак, нужно ли считывать учетные данные AWS из сетевого окружения, а также из переменных окружения `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` и `AWS_SESSION_TOKEN`, если они есть. Значение по умолчанию: `false`. +- `use_insecure_imds_request` — признак, нужно ли использовать менее безопасное соединение при выполнении запроса к IMDS при получении учётных данных из метаданных Amazon EC2. Значение по умолчанию: `false`. - `proxy` — конфигурация прокси-сервера для конечной точки S3. Каждый элемент `uri` внутри блока `proxy` должен содержать URL прокси-сервера. - `connect_timeout_ms` — таймаут подключения к сокету в миллисекундах. Значение по умолчанию: 10 секунд. - `request_timeout_ms` — таймаут выполнения запроса в миллисекундах. Значение по умолчанию: 5 секунд. diff --git a/docs/ru/getting-started/example-datasets/cell-towers.md b/docs/ru/getting-started/example-datasets/cell-towers.md new file mode 100644 index 00000000000..a5524248019 --- /dev/null +++ b/docs/ru/getting-started/example-datasets/cell-towers.md @@ -0,0 +1,128 @@ +--- +toc_priority: 21 +toc_title: Вышки сотовой связи +--- + +# Вышки сотовой связи {#cell-towers} + +Источник этого набора данных (dataset) - самая большая в мире открытая база данных о сотовых вышках - [OpenCellid](https://www.opencellid.org/). К 2021-му году здесь накопилось более, чем 40 миллионов записей о сотовых вышках (GSM, LTE, UMTS, и т.д.) по всему миру с их географическими координатами и метаданными (код страны, сети, и т.д.). + +OpenCelliD Project имеет лицензию Creative Commons Attribution-ShareAlike 4.0 International License, и мы распространяем снэпшот набора данных по условиям этой же лицензии. После авторизации можно загрузить последнюю версию набора данных. + +## Как получить набор данных {#get-the-dataset} + +1. Загрузите снэпшот набора данных за февраль 2021 [отсюда](https://datasets.clickhouse.tech/cell_towers.csv.xz) (729 MB). + +2. Если нужно, проверьте полноту и целостность при помощи команды: + +``` +md5sum cell_towers.csv.xz +8cf986f4a0d9f12c6f384a0e9192c908 cell_towers.csv.xz +``` + +3. Распакуйте набор данных при помощи команды: + +``` +xz -d cell_towers.csv.xz +``` + +4. Создайте таблицу: + +``` +CREATE TABLE cell_towers +( + radio Enum8('' = 0, 'CDMA' = 1, 'GSM' = 2, 'LTE' = 3, 'NR' = 4, 'UMTS' = 5), + mcc UInt16, + net UInt16, + area UInt16, + cell UInt64, + unit Int16, + lon Float64, + lat Float64, + range UInt32, + samples UInt32, + changeable UInt8, + created DateTime, + updated DateTime, + averageSignal UInt8 +) +ENGINE = MergeTree ORDER BY (radio, mcc, net, created); +``` + +5. Вставьте данные: +``` +clickhouse-client --query "INSERT INTO cell_towers FORMAT CSVWithNames" < cell_towers.csv +``` + +## Примеры {#examples} + +1. Количество вышек по типам: + +``` +SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC + +┌─radio─┬────────c─┐ +│ UMTS │ 20686487 │ +│ LTE │ 12101148 │ +│ GSM │ 9931312 │ +│ CDMA │ 556344 │ +│ NR │ 867 │ +└───────┴──────────┘ + +5 rows in set. Elapsed: 0.011 sec. Processed 43.28 million rows, 43.28 MB (3.83 billion rows/s., 3.83 GB/s.) +``` + +2. Количество вышек по [мобильному коду страны (MCC)](https://ru.wikipedia.org/wiki/Mobile_Country_Code): + +``` +SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10 + +┌─mcc─┬─count()─┐ +│ 310 │ 5024650 │ +│ 262 │ 2622423 │ +│ 250 │ 1953176 │ +│ 208 │ 1891187 │ +│ 724 │ 1836150 │ +│ 404 │ 1729151 │ +│ 234 │ 1618924 │ +│ 510 │ 1353998 │ +│ 440 │ 1343355 │ +│ 311 │ 1332798 │ +└─────┴─────────┘ + +10 rows in set. Elapsed: 0.019 sec. Processed 43.28 million rows, 86.55 MB (2.33 billion rows/s., 4.65 GB/s.) +``` + +Можно увидеть, что по количеству вышек лидируют следующие страны: США, Германия, Россия. + +Вы также можете создать [внешний словарь](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) в ClickHouse для того, чтобы расшифровать эти значения. + +## Пример использования {#use-case} + +Рассмотрим применение функции `pointInPolygon`. + +1. Создаем таблицу, в которой будем хранить многоугольники: + +``` +CREATE TEMPORARY TABLE moscow (polygon Array(Tuple(Float64, Float64))); +``` + +2. Очертания Москвы выглядят приблизительно так ("Новая Москва" в них не включена): + +``` +INSERT INTO moscow VALUES ([(37.84172564285271, 55.78000432402266), (37.8381207618713, 55.775874525970494), (37.83979446823122, 55.775626746008065), (37.84243326983639, 55.77446586811748), (37.84262672750849, 55.771974101091104), (37.84153238623039, 55.77114545193181), (37.841124690460184, 55.76722010265554), (37.84239076983644, 55.76654891107098), (37.842283558197025, 55.76258709833121), (37.8421759312134, 55.758073999993734), (37.84198330422974, 55.75381499999371), (37.8416827275085, 55.749277102484484), (37.84157576190186, 55.74794544108413), (37.83897929098507, 55.74525257875241), (37.83739676451868, 55.74404373042019), (37.838732481460525, 55.74298009816793), (37.841183997352545, 55.743060321833575), (37.84097476190185, 55.73938799999373), (37.84048155819702, 55.73570799999372), (37.840095812164286, 55.73228210777237), (37.83983814285274, 55.73080491981639), (37.83846476321406, 55.729799917464675), (37.83835745269769, 55.72919751082619), (37.838636380279524, 55.72859509486539), (37.8395161005249, 55.727705075632784), (37.83897964285276, 55.722727886185154), (37.83862557539366, 55.72034817326636), (37.83559735744853, 55.71944437307499), (37.835370708803126, 55.71831419154461), (37.83738169402022, 55.71765218986692), (37.83823396494291, 55.71691750159089), (37.838056931213345, 55.71547311301385), (37.836812846557606, 55.71221445615604), (37.83522525396725, 55.709331054395555), (37.83269301586908, 55.70953687463627), (37.829667367706236, 55.70903403789297), (37.83311126588435, 55.70552351822608), (37.83058993121339, 55.70041317726053), (37.82983872750851, 55.69883771404813), (37.82934501586913, 55.69718947487017), (37.828926414016685, 55.69504441658371), (37.82876530422971, 55.69287499999378), (37.82894754100031, 55.690759754047335), (37.827697554878185, 55.68951421135665), (37.82447346292115, 55.68965045405069), (37.83136543914793, 55.68322046195302), (37.833554015869154, 55.67814012759211), (37.83544184655761, 55.67295011628339), (37.837480388885474, 55.6672498719639), (37.838960677246064, 55.66316274139358), (37.83926093121332, 55.66046999999383), (37.839025050262435, 55.65869897264431), (37.83670784390257, 55.65794084879904), (37.835656529083245, 55.65694309303843), (37.83704060449217, 55.65689306460552), (37.83696819873806, 55.65550363526252), (37.83760389616388, 55.65487847246661), (37.83687972750851, 55.65356745541324), (37.83515216004943, 55.65155951234079), (37.83312418518067, 55.64979413590619), (37.82801726983639, 55.64640836412121), (37.820614174591, 55.64164525405531), (37.818908190475426, 55.6421883258084), (37.81717543386075, 55.64112490388471), (37.81690987037274, 55.63916106913107), (37.815099354492155, 55.637925371757085), (37.808769150787356, 55.633798276884455), (37.80100123544311, 55.62873670012244), (37.79598013491824, 55.62554336109055), (37.78634567724606, 55.62033499605651), (37.78334147619623, 55.618768681480326), (37.77746201055901, 55.619855533402706), (37.77527329626457, 55.61909966711279), (37.77801986242668, 55.618770300976294), (37.778212973541216, 55.617257701952106), (37.77784818518065, 55.61574504433011), (37.77016867724609, 55.61148576294007), (37.760191219573976, 55.60599579539028), (37.75338926983641, 55.60227892751446), (37.746329965606634, 55.59920577639331), (37.73939925396728, 55.59631430313617), (37.73273665739439, 55.5935318803559), (37.7299954450912, 55.59350760316188), (37.7268679946899, 55.59469840523759), (37.72626726983634, 55.59229549697373), (37.7262673598022, 55.59081598950582), (37.71897193121335, 55.5877595845419), (37.70871550793456, 55.58393177431724), (37.700497489410374, 55.580917323756644), (37.69204305026244, 55.57778089778455), (37.68544477378839, 55.57815154690915), (37.68391050793454, 55.57472945079756), (37.678803592590306, 55.57328235936491), (37.6743402539673, 55.57255251445782), (37.66813862698363, 55.57216388774464), (37.617927457672096, 55.57505691895805), (37.60443099999999, 55.5757737568051), (37.599683515869145, 55.57749105910326), (37.59754177842709, 55.57796291823627), (37.59625834786988, 55.57906686095235), (37.59501783265684, 55.57746616444403), (37.593090671936025, 55.57671634534502), (37.587018007904, 55.577944600233785), (37.578692203704804, 55.57982895000019), (37.57327546607398, 55.58116294118248), (37.57385012109279, 55.581550362779), (37.57399562266922, 55.5820107079112), (37.5735356072979, 55.58226289171689), (37.57290393054962, 55.582393529795155), (37.57037722355653, 55.581919415056234), (37.5592298306885, 55.584471614867844), (37.54189249206543, 55.58867650795186), (37.5297256269836, 55.59158133551745), (37.517837865081766, 55.59443656218868), (37.51200186508174, 55.59635625174229), (37.506808949737554, 55.59907823904434), (37.49820432275389, 55.6062944994944), (37.494406071441674, 55.60967103463367), (37.494760001358024, 55.61066689753365), (37.49397137107085, 55.61220931698269), (37.49016528606031, 55.613417718449064), (37.48773249206542, 55.61530616333343), (37.47921386508177, 55.622640129112334), (37.470652153442394, 55.62993723476164), (37.46273446298218, 55.6368075123157), (37.46350692265317, 55.64068225239439), (37.46050283203121, 55.640794546982576), (37.457627470916734, 55.64118904154646), (37.450718034393326, 55.64690488145138), (37.44239252645875, 55.65397824729769), (37.434587576721185, 55.66053543155961), (37.43582144975277, 55.661693766520735), (37.43576786245721, 55.662755031737014), (37.430982915344174, 55.664610641628116), (37.428547447097685, 55.66778515273695), (37.42945134592044, 55.668633314343566), (37.42859571562949, 55.66948145750025), (37.4262836402282, 55.670813882451405), (37.418709037048295, 55.6811141674414), (37.41922139651101, 55.68235377885389), (37.419218771842885, 55.68359335082235), (37.417196501327446, 55.684375235224735), (37.41607020370478, 55.68540557585352), (37.415640857147146, 55.68686637150793), (37.414632153442334, 55.68903015131686), (37.413344899475064, 55.690896881757396), (37.41171432275391, 55.69264232162232), (37.40948282275393, 55.69455101638112), (37.40703674603271, 55.69638690385348), (37.39607169577025, 55.70451821283731), (37.38952706878662, 55.70942491932811), (37.387778313491815, 55.71149057784176), (37.39049275399779, 55.71419814298992), (37.385557272491454, 55.7155489617061), (37.38388335714726, 55.71849856042102), (37.378368238098155, 55.7292763261685), (37.37763597123337, 55.730845879211614), (37.37890062088197, 55.73167906388319), (37.37750451918789, 55.734703664681774), (37.375610832015965, 55.734851959522246), (37.3723813571472, 55.74105626086403), (37.37014935714723, 55.746115620904355), (37.36944173016362, 55.750883999993725), (37.36975304365541, 55.76335905525834), (37.37244070571134, 55.76432079697595), (37.3724259757175, 55.76636979670426), (37.369922155757884, 55.76735417953104), (37.369892695770275, 55.76823419316575), (37.370214730163575, 55.782312184391266), (37.370493611114505, 55.78436801120489), (37.37120164550783, 55.78596427165359), (37.37284851456452, 55.7874378183096), (37.37608325135799, 55.7886695054807), (37.3764587460632, 55.78947647305964), (37.37530000265506, 55.79146512926804), (37.38235915344241, 55.79899647809345), (37.384344043655396, 55.80113596939471), (37.38594269577028, 55.80322699999366), (37.38711208598329, 55.804919036911976), (37.3880239841309, 55.806610999993666), (37.38928977249147, 55.81001864976979), (37.39038389947512, 55.81348641242801), (37.39235781481933, 55.81983538336746), (37.393709457672124, 55.82417822811877), (37.394685720901464, 55.82792275755836), (37.39557615344238, 55.830447148154136), (37.39844478226658, 55.83167107969975), (37.40019761214057, 55.83151823557964), (37.400398790382326, 55.83264967594742), (37.39659544313046, 55.83322180909622), (37.39667059524539, 55.83402792148566), (37.39682089947515, 55.83638877400216), (37.39643489154053, 55.83861656112751), (37.3955338994751, 55.84072348043264), (37.392680272491454, 55.84502158126453), (37.39241188227847, 55.84659117913199), (37.392529730163616, 55.84816071336481), (37.39486835714723, 55.85288092980303), (37.39873052645878, 55.859893456073635), (37.40272161111449, 55.86441833633205), (37.40697072750854, 55.867579567544375), (37.410007082016016, 55.868369880337), (37.4120992989502, 55.86920843741314), (37.412668021163924, 55.87055369615854), (37.41482461111453, 55.87170587948249), (37.41862266137694, 55.873183961039565), (37.42413732540892, 55.874879126654704), (37.4312182698669, 55.875614937236705), (37.43111093783558, 55.8762723478417), (37.43332105622856, 55.87706546369396), (37.43385747619623, 55.87790681284802), (37.441303050262405, 55.88027084462084), (37.44747234260555, 55.87942070143253), (37.44716141796871, 55.88072960917233), (37.44769797085568, 55.88121221323979), (37.45204320500181, 55.882080694420715), (37.45673176190186, 55.882346110794586), (37.463383999999984, 55.88252729504517), (37.46682797486874, 55.88294937719063), (37.470014457672086, 55.88361266759345), (37.47751410450743, 55.88546991372396), (37.47860317658232, 55.88534929207307), (37.48165826025772, 55.882563306475106), (37.48316434442331, 55.8815803226785), (37.483831555817645, 55.882427612793315), (37.483182967125686, 55.88372791409729), (37.483092277908824, 55.88495581062434), (37.4855716508179, 55.8875561994203), (37.486440636245746, 55.887827444039566), (37.49014203439328, 55.88897899871799), (37.493210285705544, 55.890208937135604), (37.497512451065035, 55.891342397444696), (37.49780744510645, 55.89174030252967), (37.49940333499519, 55.89239745507079), (37.50018383334346, 55.89339220941865), (37.52421672750851, 55.903869074155224), (37.52977457672118, 55.90564076517974), (37.53503220370484, 55.90661661218259), (37.54042858064267, 55.90714113744566), (37.54320461007303, 55.905645048442985), (37.545686966066306, 55.906608607018505), (37.54743976120755, 55.90788552162358), (37.55796999999999, 55.90901557907218), (37.572711542327866, 55.91059395704873), (37.57942799999998, 55.91073854155573), (37.58502865872187, 55.91009969268444), (37.58739968913264, 55.90794809960554), (37.59131567193598, 55.908713267595054), (37.612687423278814, 55.902866854295375), (37.62348079629517, 55.90041967242986), (37.635797880950896, 55.898141151686396), (37.649487626983664, 55.89639275532968), (37.65619302513125, 55.89572360207488), (37.66294133862307, 55.895295577183965), (37.66874564418033, 55.89505457604897), (37.67375601586915, 55.89254677027454), (37.67744661901856, 55.8947775867987), (37.688347, 55.89450045676125), (37.69480554232789, 55.89422926332761), (37.70107096560668, 55.89322256101114), (37.705962965606716, 55.891763491662616), (37.711885134918205, 55.889110234998974), (37.71682005026245, 55.886577568759876), (37.7199315476074, 55.88458159806678), (37.72234560316464, 55.882281005794134), (37.72364385977171, 55.8809452036196), (37.725371142837474, 55.8809722706006), (37.727870902099546, 55.88037213862385), (37.73394330422971, 55.877941504088696), (37.745339592590376, 55.87208120378722), (37.75525267724611, 55.86703807949492), (37.76919976190188, 55.859821640197474), (37.827835219574, 55.82962968399116), (37.83341438888553, 55.82575289922351), (37.83652584655761, 55.82188784027888), (37.83809213491821, 55.81612575504693), (37.83605359521481, 55.81460347077685), (37.83632178569025, 55.81276696067908), (37.838623105812026, 55.811486181656385), (37.83912198147584, 55.807329380532785), (37.839079078033414, 55.80510270463816), (37.83965844708251, 55.79940712529036), (37.840581150787344, 55.79131399999368), (37.84172564285271, 55.78000432402266)]); +``` + +3. Проверяем, сколько сотовых вышек находится в Москве: + +``` +SELECT count() FROM cell_towers WHERE pointInPolygon((lon, lat), (SELECT * FROM moscow)) + +┌─count()─┐ +│ 310463 │ +└─────────┘ + +1 rows in set. Elapsed: 0.067 sec. Processed 43.28 million rows, 692.42 MB (645.83 million rows/s., 10.33 GB/s.) +``` + +Вы можете протестировать другие запросы с помощью интерактивного ресурса [Playground](https://gh-api.clickhouse.tech/play?user=play). Например, [вот так](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=). Однако, обратите внимание, что здесь нельзя создавать временные таблицы. diff --git a/docs/ru/getting-started/example-datasets/index.md b/docs/ru/getting-started/example-datasets/index.md index f590300adda..756b3a75dee 100644 --- a/docs/ru/getting-started/example-datasets/index.md +++ b/docs/ru/getting-started/example-datasets/index.md @@ -16,4 +16,5 @@ toc_title: "Введение" - [AMPLab Big Data Benchmark](amplab-benchmark.md) - [Данные о такси в Нью-Йорке](nyc-taxi.md) - [OnTime](ontime.md) +- [Вышки сотовой связи](../../getting-started/example-datasets/cell-towers.md) diff --git a/docs/ru/getting-started/example-datasets/ontime.md b/docs/ru/getting-started/example-datasets/ontime.md index be5b1cd1b70..d46b7e75e7f 100644 --- a/docs/ru/getting-started/example-datasets/ontime.md +++ b/docs/ru/getting-started/example-datasets/ontime.md @@ -27,126 +27,127 @@ done Создание таблицы: ``` sql -CREATE TABLE `ontime` ( - `Year` UInt16, - `Quarter` UInt8, - `Month` UInt8, - `DayofMonth` UInt8, - `DayOfWeek` UInt8, - `FlightDate` Date, - `UniqueCarrier` FixedString(7), - `AirlineID` Int32, - `Carrier` FixedString(2), - `TailNum` String, - `FlightNum` String, - `OriginAirportID` Int32, - `OriginAirportSeqID` Int32, - `OriginCityMarketID` Int32, - `Origin` FixedString(5), - `OriginCityName` String, - `OriginState` FixedString(2), - `OriginStateFips` String, - `OriginStateName` String, - `OriginWac` Int32, - `DestAirportID` Int32, - `DestAirportSeqID` Int32, - `DestCityMarketID` Int32, - `Dest` FixedString(5), - `DestCityName` String, - `DestState` FixedString(2), - `DestStateFips` String, - `DestStateName` String, - `DestWac` Int32, - `CRSDepTime` Int32, - `DepTime` Int32, - `DepDelay` Int32, - `DepDelayMinutes` Int32, - `DepDel15` Int32, - `DepartureDelayGroups` String, - `DepTimeBlk` String, - `TaxiOut` Int32, - `WheelsOff` Int32, - `WheelsOn` Int32, - `TaxiIn` Int32, - `CRSArrTime` Int32, - `ArrTime` Int32, - `ArrDelay` Int32, - `ArrDelayMinutes` Int32, - `ArrDel15` Int32, - `ArrivalDelayGroups` Int32, - `ArrTimeBlk` String, - `Cancelled` UInt8, - `CancellationCode` FixedString(1), - `Diverted` UInt8, - `CRSElapsedTime` Int32, - `ActualElapsedTime` Int32, - `AirTime` Int32, - `Flights` Int32, - `Distance` Int32, - `DistanceGroup` UInt8, - `CarrierDelay` Int32, - `WeatherDelay` Int32, - `NASDelay` Int32, - `SecurityDelay` Int32, - `LateAircraftDelay` Int32, - `FirstDepTime` String, - `TotalAddGTime` String, - `LongestAddGTime` String, - `DivAirportLandings` String, - `DivReachedDest` String, - `DivActualElapsedTime` String, - `DivArrDelay` String, - `DivDistance` String, - `Div1Airport` String, - `Div1AirportID` Int32, - `Div1AirportSeqID` Int32, - `Div1WheelsOn` String, - `Div1TotalGTime` String, - `Div1LongestGTime` String, - `Div1WheelsOff` String, - `Div1TailNum` String, - `Div2Airport` String, - `Div2AirportID` Int32, - `Div2AirportSeqID` Int32, - `Div2WheelsOn` String, - `Div2TotalGTime` String, - `Div2LongestGTime` String, - `Div2WheelsOff` String, - `Div2TailNum` String, - `Div3Airport` String, - `Div3AirportID` Int32, - `Div3AirportSeqID` Int32, - `Div3WheelsOn` String, - `Div3TotalGTime` String, - `Div3LongestGTime` String, - `Div3WheelsOff` String, - `Div3TailNum` String, - `Div4Airport` String, - `Div4AirportID` Int32, - `Div4AirportSeqID` Int32, - `Div4WheelsOn` String, - `Div4TotalGTime` String, - `Div4LongestGTime` String, - `Div4WheelsOff` String, - `Div4TailNum` String, - `Div5Airport` String, - `Div5AirportID` Int32, - `Div5AirportSeqID` Int32, - `Div5WheelsOn` String, - `Div5TotalGTime` String, - `Div5LongestGTime` String, - `Div5WheelsOff` String, - `Div5TailNum` String +CREATE TABLE `ontime` +( + `Year` UInt16, + `Quarter` UInt8, + `Month` UInt8, + `DayofMonth` UInt8, + `DayOfWeek` UInt8, + `FlightDate` Date, + `Reporting_Airline` String, + `DOT_ID_Reporting_Airline` Int32, + `IATA_CODE_Reporting_Airline` String, + `Tail_Number` Int32, + `Flight_Number_Reporting_Airline` String, + `OriginAirportID` Int32, + `OriginAirportSeqID` Int32, + `OriginCityMarketID` Int32, + `Origin` FixedString(5), + `OriginCityName` String, + `OriginState` FixedString(2), + `OriginStateFips` String, + `OriginStateName` String, + `OriginWac` Int32, + `DestAirportID` Int32, + `DestAirportSeqID` Int32, + `DestCityMarketID` Int32, + `Dest` FixedString(5), + `DestCityName` String, + `DestState` FixedString(2), + `DestStateFips` String, + `DestStateName` String, + `DestWac` Int32, + `CRSDepTime` Int32, + `DepTime` Int32, + `DepDelay` Int32, + `DepDelayMinutes` Int32, + `DepDel15` Int32, + `DepartureDelayGroups` String, + `DepTimeBlk` String, + `TaxiOut` Int32, + `WheelsOff` Int32, + `WheelsOn` Int32, + `TaxiIn` Int32, + `CRSArrTime` Int32, + `ArrTime` Int32, + `ArrDelay` Int32, + `ArrDelayMinutes` Int32, + `ArrDel15` Int32, + `ArrivalDelayGroups` Int32, + `ArrTimeBlk` String, + `Cancelled` UInt8, + `CancellationCode` FixedString(1), + `Diverted` UInt8, + `CRSElapsedTime` Int32, + `ActualElapsedTime` Int32, + `AirTime` Nullable(Int32), + `Flights` Int32, + `Distance` Int32, + `DistanceGroup` UInt8, + `CarrierDelay` Int32, + `WeatherDelay` Int32, + `NASDelay` Int32, + `SecurityDelay` Int32, + `LateAircraftDelay` Int32, + `FirstDepTime` String, + `TotalAddGTime` String, + `LongestAddGTime` String, + `DivAirportLandings` String, + `DivReachedDest` String, + `DivActualElapsedTime` String, + `DivArrDelay` String, + `DivDistance` String, + `Div1Airport` String, + `Div1AirportID` Int32, + `Div1AirportSeqID` Int32, + `Div1WheelsOn` String, + `Div1TotalGTime` String, + `Div1LongestGTime` String, + `Div1WheelsOff` String, + `Div1TailNum` String, + `Div2Airport` String, + `Div2AirportID` Int32, + `Div2AirportSeqID` Int32, + `Div2WheelsOn` String, + `Div2TotalGTime` String, + `Div2LongestGTime` String, + `Div2WheelsOff` String, + `Div2TailNum` String, + `Div3Airport` String, + `Div3AirportID` Int32, + `Div3AirportSeqID` Int32, + `Div3WheelsOn` String, + `Div3TotalGTime` String, + `Div3LongestGTime` String, + `Div3WheelsOff` String, + `Div3TailNum` String, + `Div4Airport` String, + `Div4AirportID` Int32, + `Div4AirportSeqID` Int32, + `Div4WheelsOn` String, + `Div4TotalGTime` String, + `Div4LongestGTime` String, + `Div4WheelsOff` String, + `Div4TailNum` String, + `Div5Airport` String, + `Div5AirportID` Int32, + `Div5AirportSeqID` Int32, + `Div5WheelsOn` String, + `Div5TotalGTime` String, + `Div5LongestGTime` String, + `Div5WheelsOff` String, + `Div5TailNum` String ) ENGINE = MergeTree -PARTITION BY Year -ORDER BY (Carrier, FlightDate) -SETTINGS index_granularity = 8192; + PARTITION BY Year + ORDER BY (IATA_CODE_Reporting_Airline, FlightDate) + SETTINGS index_granularity = 8192; ``` Загрузка данных: ``` bash -$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done +ls -1 *.zip | xargs -I{} -P $(nproc) bash -c "echo {}; unzip -cq {} '*.csv' | sed 's/\.00//g' | clickhouse-client --input_format_with_names_use_header=0 --query='INSERT INTO ontime FORMAT CSVWithNames'" ``` ## Скачивание готовых партиций {#skachivanie-gotovykh-partitsii} @@ -211,7 +212,7 @@ LIMIT 10; Q4. Количество задержек по перевозчикам за 2007 год ``` sql -SELECT Carrier, count(*) +SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*) FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier @@ -225,29 +226,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year=2007 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` Более оптимальная версия того же запроса: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year=2007 GROUP BY Carrier @@ -261,29 +262,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` Более оптимальная версия того же запроса: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier @@ -302,7 +303,7 @@ FROM from ontime WHERE DepDelay>10 GROUP BY Year -) +) q JOIN ( select @@ -310,7 +311,7 @@ JOIN count(*) as c2 from ontime GROUP BY Year -) USING (Year) +) qq USING (Year) ORDER BY Year; ``` @@ -346,7 +347,7 @@ Q10. ``` sql SELECT - min(Year), max(Year), Carrier, count(*) AS cnt, + min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt, sum(ArrDelayMinutes>30) AS flights_delayed, round(sum(ArrDelayMinutes>30)/count(*),2) AS rate FROM ontime diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index 4ae27a910ea..d0a54d9043a 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -95,7 +95,9 @@ sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh - [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse` - [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse` -После скачивания, можно воспользоваться `clickhouse client` для подключения к серверу, или `clickhouse local` для обработки локальных данных. Для запуска `clickhouse server` необходимо скачать конфигурационные файлы [сервера](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml) и [пользователей](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/users.xml) с GitHub. +После скачивания можно воспользоваться `clickhouse client` для подключения к серверу или `clickhouse local` для обработки локальных данных. + +Чтобы установить ClickHouse в рамках всей системы (с необходимыми конфигурационными файлами, настройками пользователей и т.д.), выполните `sudo ./clickhouse install`. Затем выполните команды `clickhouse start` (чтобы запустить сервер) и `clickhouse-client` (чтобы подключиться к нему). Данные сборки не рекомендуются для использования в продакшене, так как они недостаточно тщательно протестированны. Также, в них присутствуют не все возможности ClickHouse. @@ -172,4 +174,3 @@ SELECT 1 **Поздравляем, система работает!** Для дальнейших экспериментов можно попробовать загрузить один из тестовых наборов данных или пройти [пошаговое руководство для начинающих](https://clickhouse.tech/tutorial.html). - diff --git a/docs/ru/guides/apply-catboost-model.md b/docs/ru/guides/apply-catboost-model.md index 11964c57fc7..db2be63692f 100644 --- a/docs/ru/guides/apply-catboost-model.md +++ b/docs/ru/guides/apply-catboost-model.md @@ -158,7 +158,9 @@ FROM amazon_train /home/catboost/data/libcatboostmodel.so /home/catboost/models/*_model.xml ``` - +!!! note "Примечание" + Вы можете позднее изменить путь к конфигурации модели CatBoost без перезагрузки сервера. + ## 4. Запустите вывод модели из SQL {#run-model-inference} Для тестирования модели запустите клиент ClickHouse `$ clickhouse client`. diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 96ec36be79f..277b73a6d36 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -121,6 +121,7 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe - `--user, -u` — имя пользователя, по умолчанию — ‘default’. - `--password` — пароль, по умолчанию — пустая строка. - `--query, -q` — запрос для выполнения, при использовании в неинтерактивном режиме. +- `--queries-file, -qf` - путь к файлу с запросами для выполнения. Необходимо указать только одну из опций: `query` или `queries-file`. - `--database, -d` — выбрать текущую БД. Без указания значение берется из настроек сервера (по умолчанию — БД ‘default’). - `--multiline, -m` — если указано — разрешить многострочные запросы, не отправлять запрос по нажатию Enter. - `--multiquery, -n` — если указано — разрешить выполнять несколько запросов, разделённых точкой с запятой. @@ -130,6 +131,7 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe - `--stacktrace` — если указано, в случае исключения, выводить также его стек-трейс. - `--config-file` — имя конфигурационного файла. - `--secure` — если указано, будет использован безопасный канал. +- `--history_file` - путь к файлу с историей команд. - `--param_` — значение параметра для [запроса с параметрами](#cli-queries-with-parameters). Начиная с версии 20.5, в `clickhouse-client` есть автоматическая подсветка синтаксиса (включена всегда). diff --git a/docs/ru/interfaces/third-party/gui.md b/docs/ru/interfaces/third-party/gui.md index f913a0ff2cc..dc96c32e996 100644 --- a/docs/ru/interfaces/third-party/gui.md +++ b/docs/ru/interfaces/third-party/gui.md @@ -166,4 +166,25 @@ toc_title: "Визуальные интерфейсы от сторонних р [Как сконфигурировать ClickHouse в Looker.](https://docs.looker.com/setup-and-management/database-config/clickhouse) -[Original article](https://clickhouse.tech/docs/ru/interfaces/third-party/gui/) +### SeekTable {#seektable} + +[SeekTable](https://www.seektable.com) — это аналитический инструмент для самостоятельного анализа и обработки данных бизнес-аналитики. Он доступен как в виде облачного сервиса, так и в виде локальной версии. Отчеты из SeekTable могут быть встроены в любое веб-приложение. + +Основные возможности: + +- Удобный конструктор отчетов. +- Гибкая настройка отчетов SQL и создание запросов для специфичных отчетов. +- Интегрируется с ClickHouse, используя собственную точку приема запроса TCP/IP или интерфейс HTTP(S) (два разных драйвера). +- Поддерживает всю мощь диалекта ClickHouse SQL для построения запросов по различным измерениям и показателям. +- [WEB-API](https://www.seektable.com/help/web-api-integration) для автоматизированной генерации отчетов. +- Процесс разработки отчетов поддерживает [резервное копирование/восстановление данных](https://www.seektable.com/help/self-hosted-backup-restore); конфигурация моделей данных (кубов) / отчетов представляет собой удобочитаемый XML-файл, который может храниться в системе контроля версий. + +SeekTable [бесплатен](https://www.seektable.com/help/cloud-pricing) для личного/индивидуального использования. + +[Как сконфигурировать подключение ClickHouse в SeekTable.](https://www.seektable.com/help/clickhouse-pivot-table) + +### Chadmin {#chadmin} + +[Chadmin](https://github.com/bun4uk/chadmin) — простой графический интерфейс для визуализации запущенных запросов на вашем кластере ClickHouse. Он отображает информацию о запросах и дает возможность их завершать. + +[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index 84ef62650e1..abaf2a8f2da 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -291,7 +291,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part ## interserver_http_host {#interserver-http-host} -Имя хоста, которое могут использовать другие серверы для обращения к этому. +Имя хоста, которое могут использовать другие серверы для обращения к этому хосту. Если не указано, то определяется аналогично команде `hostname -f`. @@ -303,11 +303,36 @@ ClickHouse проверяет условия для `min_part_size` и `min_part example.yandex.ru ``` +## interserver_https_port {#interserver-https-port} + +Порт для обмена данными между репликами ClickHouse по протоколу `HTTPS`. + +**Пример** + +``` xml +9010 +``` + +## interserver_https_host {#interserver-https-host} + +Имя хоста, которое могут использовать другие реплики для обращения к нему по протоколу `HTTPS`. + +**Пример** + +``` xml +example.yandex.ru +``` + + + ## interserver_http_credentials {#server-settings-interserver-http-credentials} Имя пользователя и пароль, использующиеся для аутентификации при [репликации](../../operations/server-configuration-parameters/settings.md) движками Replicated\*. Это имя пользователя и пароль используются только для взаимодействия между репликами кластера и никак не связаны с аутентификацией клиентов ClickHouse. Сервер проверяет совпадение имени и пароля для соединяющихся с ним реплик, а также использует это же имя и пароль для соединения с другими репликами. Соответственно, эти имя и пароль должны быть прописаны одинаковыми для всех реплик кластера. По умолчанию аутентификация не используется. +!!! note "Примечание" + Эти учетные данные являются общими для обмена данными по протоколам `HTTP` и `HTTPS`. + Раздел содержит следующие параметры: - `user` — имя пользователя. @@ -390,7 +415,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part Значения по умолчанию: при указанном `address` - `LOG_USER`, иначе - `LOG_DAEMON` - format - формат сообщений. Возможные значения - `bsd` и `syslog` -## send_crash_reports {#server_configuration_parameters-logger} +## send_crash_reports {#server_configuration_parameters-send_crash_reports} Настройки для отправки сообщений о сбоях в команду разработчиков ядра ClickHouse через [Sentry](https://sentry.io). Включение этих настроек, особенно в pre-production среде, может дать очень ценную информацию и поможет развитию ClickHouse. diff --git a/docs/ru/operations/settings/merge-tree-settings.md b/docs/ru/operations/settings/merge-tree-settings.md index bfc0b0a2644..e58948b0148 100644 --- a/docs/ru/operations/settings/merge-tree-settings.md +++ b/docs/ru/operations/settings/merge-tree-settings.md @@ -55,6 +55,26 @@ Eсли число кусков в партиции превышает знач ClickHouse искусственно выполняет `INSERT` дольше (добавляет ‘sleep’), чтобы фоновый механизм слияния успевал слиять куски быстрее, чем они добавляются. +## inactive_parts_to_throw_insert {#inactive-parts-to-throw-insert} + +Если число неактивных кусков в партиции превышает значение `inactive_parts_to_throw_insert`, `INSERT` прерывается с исключением «Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts». + +Возможные значения: + +- Положительное целое число. + +Значение по умолчанию: 0 (не ограничено). + +## inactive_parts_to_delay_insert {#inactive-parts-to-delay-insert} + +Если число неактивных кусков в партиции больше или равно значению `inactive_parts_to_delay_insert`, `INSERT` искусственно замедляется. Это полезно, когда сервер не может быстро очистить неактивные куски. + +Возможные значения: + +- Положительное целое число. + +Значение по умолчанию: 0 (не ограничено). + ## max_delay_to_insert {#max-delay-to-insert} Величина в секундах, которая используется для расчета задержки `INSERT`, если число кусков в партиции превышает значение [parts_to_delay_insert](#parts-to-delay-insert). @@ -129,6 +149,39 @@ Eсли суммарное число активных кусков во все Стандартное значение Linux dirty_expire_centisecs - 30 секунд (максимальное время, которое записанные данные хранятся только в оперативной памяти), но при больших нагрузках на дисковую систему, данные могут быть записаны намного позже. Экспериментально было найдено время - 480 секунд, за которое гарантированно новый кусок будет записан на диск. +## replicated_fetches_http_connection_timeout {#replicated_fetches_http_connection_timeout} + +Тайм-аут HTTP-соединения (в секундах) для запросов на скачивание кусков. Наследуется из профиля по умолчанию [http_connection_timeout](./settings.md#http_connection_timeout), если не задан явно. + +Возможные значения: + +- 0 - используется значение `http_connection_timeout`. +- Любое положительное целое число. + +Значение по умолчанию: `0`. + +## replicated_fetches_http_send_timeout {#replicated_fetches_http_send_timeout} + +Тайм-аут (в секундах) для отправки HTTP-запросов на скачивание кусков. Наследуется из профиля по умолчанию [http_send_timeout](./settings.md#http_send_timeout), если не задан явно. + +Возможные значения: + +- 0 - используется значение `http_send_timeout`. +- Любое положительное целое число. + +Значение по умолчанию: `0`. + +## replicated_fetches_http_receive_timeout {#replicated_fetches_http_receive_timeout} + +Тайм-аут (в секундах) для получения HTTP-запросов на скачивание кусков. Наследуется из профиля по умолчанию [http_receive_timeout](./settings.md#http_receive_timeout), если не задан явно. + +Возможные значения: + +- 0 - используется значение `http_receive_timeout`. +- Любое положительное целое число. + +Значение по умолчанию: `0`. + ## max_bytes_to_merge_at_max_space_in_pool {#max-bytes-to-merge-at-max-space-in-pool} Максимальный суммарный размер кусков (в байтах) в одном слиянии, при наличии свободных ресурсов в фоновом пуле. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 7acdd65051b..45def8c4b50 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -119,6 +119,16 @@ ClickHouse применяет настройку в тех случаях, ко Значение по умолчанию: 0. +## http_max_uri_size {#http-max-uri-size} + +Устанавливает максимальную длину URI в HTTP-запросе. + +Возможные значения: + +- Положительное целое. + +Значение по умолчанию: 1048576. + ## send_progress_in_http_headers {#settings-send_progress_in_http_headers} Включает или отключает HTTP-заголовки `X-ClickHouse-Progress` в ответах `clickhouse-server`. @@ -844,8 +854,6 @@ SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test' Значение по умолчанию: количество процессорных ядер без учёта Hyper-Threading. -Если на сервере обычно исполняется менее одного запроса SELECT одновременно, то выставите этот параметр в значение чуть меньше количества реальных процессорных ядер. - Для запросов, которые быстро завершаются из-за LIMIT-а, имеет смысл выставить max_threads поменьше. Например, если нужное количество записей находится в каждом блоке, то при max_threads = 8 будет считано 8 блоков, хотя достаточно было прочитать один. Чем меньше `max_threads`, тем меньше будет использоваться оперативки. @@ -1753,7 +1761,7 @@ ClickHouse генерирует исключение ## background_pool_size {#background_pool_size} -Задает количество потоков для выполнения фоновых операций в движках таблиц (например, слияния в таблицах c движком [MergeTree](../../engines/table-engines/mergetree-family/index.md)). Настройка применяется при запуске сервера ClickHouse и не может быть изменена во пользовательском сеансе. Настройка позволяет управлять загрузкой процессора и диска. Чем меньше пулл, тем ниже нагрузка на CPU и диск, при этом фоновые процессы замедляются, что может повлиять на скорость выполнения запроса. +Задает количество потоков для выполнения фоновых операций в движках таблиц (например, слияния в таблицах c движком [MergeTree](../../engines/table-engines/mergetree-family/index.md)). Настройка применяется при запуске сервера ClickHouse и не может быть изменена во пользовательском сеансе. Настройка позволяет управлять загрузкой процессора и диска. Чем меньше пул, тем ниже нагрузка на CPU и диск, при этом фоновые процессы работают с меньшей интенсивностью, что в конечном итоге может повлиять на производительность запросов, потому что сервер будет обрабатывать больше кусков. Допустимые значения: @@ -2757,4 +2765,129 @@ SELECT * FROM test2; Значение по умолчанию: `0`. +## prefer_column_name_to_alias {#prefer-column-name-to-alias} + +Включает или отключает замену названий столбцов на псевдонимы (alias) в выражениях и секциях запросов, см. [Примечания по использованию синонимов](../../sql-reference/syntax.md#syntax-expression_aliases). Включите эту настройку, чтобы синтаксис псевдонимов в ClickHouse был более совместим с большинством других СУБД. + +Возможные значения: + +- 0 — псевдоним подставляется вместо имени столбца. +- 1 — псевдоним не подставляется вместо имени столбца. + +Значение по умолчанию: `0`. + +**Пример** + +Какие изменения привносит включение и выключение настройки: + +Запрос: + +```sql +SET prefer_column_name_to_alias = 0; +SELECT avg(number) AS number, max(number) FROM numbers(10); +``` + +Результат: + +```text +Received exception from server (version 21.5.1): +Code: 184. DB::Exception: Received from localhost:9000. DB::Exception: Aggregate function avg(number) is found inside another aggregate function in query: While processing avg(number) AS number. +``` + +Запрос: + +```sql +SET prefer_column_name_to_alias = 1; +SELECT avg(number) AS number, max(number) FROM numbers(10); +``` + +Результат: + +```text +┌─number─┬─max(number)─┐ +│ 4.5 │ 9 │ +└────────┴─────────────┘ +``` + +## limit {#limit} + +Устанавливает максимальное количество строк, возвращаемых запросом. Ограничивает сверху значение, установленное в запросе в секции [LIMIT](../../sql-reference/statements/select/limit.md#limit-clause). + +Возможные значения: + +- 0 — число строк не ограничено. +- Положительное целое число. + +Значение по умолчанию: `0`. + +## offset {#offset} + +Устанавливает количество строк, которые необходимо пропустить перед началом возврата строк из запроса. Суммируется со значением, установленным в запросе в секции [OFFSET](../../sql-reference/statements/select/offset.md#offset-fetch). + +Возможные значения: + +- 0 — строки не пропускаются. +- Положительное целое число. + +Значение по умолчанию: `0`. + +**Пример** + +Исходная таблица: + +``` sql +CREATE TABLE test (i UInt64) ENGINE = MergeTree() ORDER BY i; +INSERT INTO test SELECT number FROM numbers(500); +``` + +Запрос: + +``` sql +SET limit = 5; +SET offset = 7; +SELECT * FROM test LIMIT 10 OFFSET 100; +``` + +Результат: + +``` text +┌───i─┐ +│ 107 │ +│ 108 │ +│ 109 │ +└─────┘ +``` +## http_connection_timeout {#http_connection_timeout} + +Тайм-аут для HTTP-соединения (в секундах). + +Возможные значения: + +- 0 - бесконечный тайм-аут. +- Любое положительное целое число. + +Значение по умолчанию: `1`. + +## http_send_timeout {#http_send_timeout} + +Тайм-аут для отправки данных через HTTP-интерфейс (в секундах). + +Возможные значения: + +- 0 - бесконечный тайм-аут. +- Любое положительное целое число. + +Значение по умолчанию: `1800`. + +## http_receive_timeout {#http_receive_timeout} + +Тайм-аут для получения данных через HTTP-интерфейс (в секундах). + +Возможные значения: + +- 0 - бесконечный тайм-аут. +- Любое положительное целое число. + +Значение по умолчанию: `1800`. + [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) diff --git a/docs/ru/operations/system-tables/clusters.md b/docs/ru/operations/system-tables/clusters.md index ddc6849b44d..6bfeb8aa818 100644 --- a/docs/ru/operations/system-tables/clusters.md +++ b/docs/ru/operations/system-tables/clusters.md @@ -4,12 +4,68 @@ Столбцы: -- `cluster` (String) — имя кластера. -- `shard_num` (UInt32) — номер шарда в кластере, начиная с 1. -- `shard_weight` (UInt32) — относительный вес шарда при записи данных. -- `replica_num` (UInt32) — номер реплики в шарде, начиная с 1. -- `host_name` (String) — хост, указанный в конфигурации. -- `host_address` (String) — TIP-адрес хоста, полученный из DNS. -- `port` (UInt16) — порт, на который обращаться для соединения с сервером. -- `user` (String) — имя пользователя, которого использовать для соединения с сервером. +- `cluster` ([String](../../sql-reference/data-types/string.md)) — имя кластера. +- `shard_num` ([UInt32](../../sql-reference/data-types/int-uint.md)) — номер шарда в кластере, начиная с 1. +- `shard_weight` ([UInt32](../../sql-reference/data-types/int-uint.md)) — относительный вес шарда при записи данных. +- `replica_num` ([UInt32](../../sql-reference/data-types/int-uint.md)) — номер реплики в шарде, начиная с 1. +- `host_name` ([String](../../sql-reference/data-types/string.md)) — хост, указанный в конфигурации. +- `host_address` ([String](../../sql-reference/data-types/string.md)) — TIP-адрес хоста, полученный из DNS. +- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт для соединения с сервером. +- `is_local` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, показывающий является ли хост локальным. +- `user` ([String](../../sql-reference/data-types/string.md)) — имя пользователя для соединения с сервером. +- `default_database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных по умолчанию. +- `errors_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество неудачных попыток хоста получить доступ к реплике. +- `slowdowns_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество замен реплики из-за долгого отсутствия ответа от нее при установке соединения при хеджированных запросах. +- `estimated_recovery_time` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество секунд до момента, когда количество ошибок будет обнулено и реплика станет доступной. +**Пример** + +Запрос: + +```sql +SELECT * FROM system.clusters LIMIT 2 FORMAT Vertical; +``` + +Результат: + +```text +Row 1: +────── +cluster: test_cluster_two_shards +shard_num: 1 +shard_weight: 1 +replica_num: 1 +host_name: 127.0.0.1 +host_address: 127.0.0.1 +port: 9000 +is_local: 1 +user: default +default_database: +errors_count: 0 +slowdowns_count: 0 +estimated_recovery_time: 0 + +Row 2: +────── +cluster: test_cluster_two_shards +shard_num: 2 +shard_weight: 1 +replica_num: 1 +host_name: 127.0.0.2 +host_address: 127.0.0.2 +port: 9000 +is_local: 0 +user: default +default_database: +errors_count: 0 +slowdowns_count: 0 +estimated_recovery_time: 0 +``` + +**Смотрите также** + +- [Table engine Distributed](../../engines/table-engines/special/distributed.md) +- [Настройка distributed_replica_error_cap](../../operations/settings/settings.md#settings-distributed_replica_error_cap) +- [Настройка distributed_replica_error_half_life](../../operations/settings/settings.md#settings-distributed_replica_error_half_life) + +[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/clusters) diff --git a/docs/ru/operations/system-tables/columns.md b/docs/ru/operations/system-tables/columns.md index af4cff85439..b8a0aef2299 100644 --- a/docs/ru/operations/system-tables/columns.md +++ b/docs/ru/operations/system-tables/columns.md @@ -4,7 +4,9 @@ С помощью этой таблицы можно получить информацию аналогично запросу [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table), но для многих таблиц сразу. -Таблица `system.columns` содержит столбцы (тип столбца указан в скобках): +Колонки [временных таблиц](../../sql-reference/statements/create/table.md#temporary-tables) содержатся в `system.columns` только в тех сессиях, в которых эти таблицы были созданы. Поле `database` у таких колонок пустое. + +Cтолбцы: - `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных. - `table` ([String](../../sql-reference/data-types/string.md)) — имя таблицы. @@ -23,3 +25,46 @@ - `is_in_sampling_key` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, показывающий включение столбца в ключ выборки. - `compression_codec` ([String](../../sql-reference/data-types/string.md)) — имя кодека сжатия. +**Пример** + +```sql +SELECT * FROM system.columns LIMIT 2 FORMAT Vertical; +``` + +```text +Row 1: +────── +database: system +table: aggregate_function_combinators +name: name +type: String +default_kind: +default_expression: +data_compressed_bytes: 0 +data_uncompressed_bytes: 0 +marks_bytes: 0 +comment: +is_in_partition_key: 0 +is_in_sorting_key: 0 +is_in_primary_key: 0 +is_in_sampling_key: 0 +compression_codec: + +Row 2: +────── +database: system +table: aggregate_function_combinators +name: is_internal +type: UInt8 +default_kind: +default_expression: +data_compressed_bytes: 0 +data_uncompressed_bytes: 0 +marks_bytes: 0 +comment: +is_in_partition_key: 0 +is_in_sorting_key: 0 +is_in_primary_key: 0 +is_in_sampling_key: 0 +compression_codec: +``` diff --git a/docs/ru/operations/system-tables/dictionaries.md b/docs/ru/operations/system-tables/dictionaries.md index 6a49904aae9..b865fea736f 100644 --- a/docs/ru/operations/system-tables/dictionaries.md +++ b/docs/ru/operations/system-tables/dictionaries.md @@ -21,6 +21,7 @@ - `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Объем оперативной памяти, используемый словарем. - `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки. - `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — Для cache-словарей — процент закэшированных значений. +- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — Процент обращений к словарю, при которых значение было найдено. - `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Количество элементов, хранящихся в словаре. - `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы). - `source` ([String](../../sql-reference/data-types/string.md)) — Текст, описывающий [источник данных](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) для словаря. @@ -58,4 +59,3 @@ SELECT * FROM system.dictionaries │ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │ └──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ ``` - diff --git a/docs/ru/operations/system-tables/replication_queue.md b/docs/ru/operations/system-tables/replication_queue.md index 56e8c695a21..2f9d80be16f 100644 --- a/docs/ru/operations/system-tables/replication_queue.md +++ b/docs/ru/operations/system-tables/replication_queue.md @@ -14,7 +14,17 @@ - `node_name` ([String](../../sql-reference/data-types/string.md)) — имя узла в ZooKeeper. -- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS` или `MUTATE_PARTS`. +- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди: + + - `GET_PART` — скачать кусок с другой реплики. + - `ATTACH_PART` — присоединить кусок. Задача может быть выполнена и с куском из нашей собственной реплики (если он находится в папке `detached`). Эта задача практически идентична задаче `GET_PART`, лишь немного оптимизирована. + - `MERGE_PARTS` — выполнить слияние кусков. + - `DROP_RANGE` — удалить куски в партициях из указнного диапазона. + - `CLEAR_COLUMN` — удалить указанный столбец из указанной партиции. Примечание: не используется с 20.4. + - `CLEAR_INDEX` — удалить указанный индекс из указанной партиции. Примечание: не используется с 20.4. + - `REPLACE_RANGE` — удалить указанный диапазон кусков и заменить их на новые. + - `MUTATE_PART` — применить одну или несколько мутаций к куску. + - `ALTER_METADATA` — применить изменения структуры таблицы в результате запросов с выражением `ALTER`. - `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение. @@ -77,4 +87,3 @@ last_postpone_time: 1970-01-01 03:00:00 **Смотрите также** - [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md#query-language-system-replicated) - diff --git a/docs/ru/operations/system-tables/tables.md b/docs/ru/operations/system-tables/tables.md index 42e55b1f6b7..11bb6a9eda2 100644 --- a/docs/ru/operations/system-tables/tables.md +++ b/docs/ru/operations/system-tables/tables.md @@ -1,39 +1,94 @@ # system.tables {#system-tables} -Содержит метаданные каждой таблицы, о которой знает сервер. Отсоединённые таблицы не отображаются в `system.tables`. +Содержит метаданные каждой таблицы, о которой знает сервер. -Эта таблица содержит следующие столбцы (тип столбца показан в скобках): +Отсоединённые таблицы ([DETACH](../../sql-reference/statements/detach.md)) не отображаются в `system.tables`. -- `database String` — имя базы данных, в которой находится таблица. -- `name` (String) — имя таблицы. -- `engine` (String) — движок таблицы (без параметров). -- `is_temporary` (UInt8) — флаг, указывающий на то, временная это таблица или нет. -- `data_path` (String) — путь к данным таблицы в файловой системе. -- `metadata_path` (String) — путь к табличным метаданным в файловой системе. -- `metadata_modification_time` (DateTime) — время последней модификации табличных метаданных. -- `dependencies_database` (Array(String)) — зависимости базы данных. -- `dependencies_table` (Array(String)) — табличные зависимости (таблицы [MaterializedView](../../engines/table-engines/special/materializedview.md), созданные на базе текущей таблицы). -- `create_table_query` (String) — запрос, которым создавалась таблица. -- `engine_full` (String) — параметры табличного движка. -- `partition_key` (String) — ключ партиционирования таблицы. -- `sorting_key` (String) — ключ сортировки таблицы. -- `primary_key` (String) - первичный ключ таблицы. -- `sampling_key` (String) — ключ сэмплирования таблицы. -- `storage_policy` (String) - политика хранения данных: +Информация о [временных таблицах](../../sql-reference/statements/create/table.md#temporary-tables) содержится в `system.tables` только в тех сессиях, в которых эти таблицы были созданы. Поле `database` у таких таблиц пустое, а флаг `is_temporary` включен. + +Столбцы: + +- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных, в которой находится таблица. +- `name` ([String](../../sql-reference/data-types/string.md)) — имя таблицы. +- `engine` ([String](../../sql-reference/data-types/string.md)) — движок таблицы (без параметров). +- `is_temporary` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, указывающий на то, временная это таблица или нет. +- `data_path` ([String](../../sql-reference/data-types/string.md)) — путь к данным таблицы в файловой системе. +- `metadata_path` ([String](../../sql-reference/data-types/string.md)) — путь к табличным метаданным в файловой системе. +- `metadata_modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время последней модификации табличных метаданных. +- `dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — зависимости базы данных. +- `dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — табличные зависимости (таблицы [MaterializedView](../../engines/table-engines/special/materializedview.md), созданные на базе текущей таблицы). +- `create_table_query` ([String](../../sql-reference/data-types/string.md)) — запрос, при помощи которого создавалась таблица. +- `engine_full` ([String](../../sql-reference/data-types/string.md)) — параметры табличного движка. +- `partition_key` ([String](../../sql-reference/data-types/string.md)) — ключ партиционирования таблицы. +- `sorting_key` ([String](../../sql-reference/data-types/string.md)) — ключ сортировки таблицы. +- `primary_key` ([String](../../sql-reference/data-types/string.md)) - первичный ключ таблицы. +- `sampling_key` ([String](../../sql-reference/data-types/string.md)) — ключ сэмплирования таблицы. +- `storage_policy` ([String](../../sql-reference/data-types/string.md)) - политика хранения данных: - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) - [Distributed](../../engines/table-engines/special/distributed.md#distributed) -- `total_rows` (Nullable(UInt64)) - общее количество строк, если есть возможность быстро определить точное количество строк в таблице, в противном случае `Null` (включая базовую таблицу `Buffer`). +- `total_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - общее количество строк, если есть возможность быстро определить точное количество строк в таблице, в противном случае `NULL` (включая базовую таблицу `Buffer`). -- `total_bytes` (Nullable(UInt64)) - общее количество байт, если можно быстро определить точное количество байт для таблицы на накопителе, в противном случае `Null` (**не включает** в себя никакого базового хранилища). +- `total_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - общее количество байт, если можно быстро определить точное количество байт для таблицы на накопителе, в противном случае `NULL` (не включает в себя никакого базового хранилища). - Если таблица хранит данные на диске, возвращает используемое пространство на диске (т. е. сжатое). - Если таблица хранит данные в памяти, возвращает приблизительное количество используемых байт в памяти. -- `lifetime_rows` (Nullable(UInt64)) - общее количество строк, добавленных оператором `INSERT` с момента запуска сервера (только для таблиц `Buffer`). +- `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - общее количество строк, добавленных оператором `INSERT` с момента запуска сервера (только для таблиц `Buffer`). -- `lifetime_bytes` (Nullable(UInt64)) - общее количество байт, добавленных оператором `INSERT` с момента запуска сервера (только для таблиц `Buffer`). +- `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - общее количество байт, добавленных оператором `INSERT` с момента запуска сервера (только для таблиц `Buffer`). Таблица `system.tables` используется при выполнении запроса `SHOW TABLES`. +**Пример** + +```sql +SELECT * FROM system.tables LIMIT 2 FORMAT Vertical; +``` + +```text +Row 1: +────── +database: system +name: aggregate_function_combinators +uuid: 00000000-0000-0000-0000-000000000000 +engine: SystemAggregateFunctionCombinators +is_temporary: 0 +data_paths: [] +metadata_path: /var/lib/clickhouse/metadata/system/aggregate_function_combinators.sql +metadata_modification_time: 1970-01-01 03:00:00 +dependencies_database: [] +dependencies_table: [] +create_table_query: +engine_full: +partition_key: +sorting_key: +primary_key: +sampling_key: +storage_policy: +total_rows: ᴺᵁᴸᴸ +total_bytes: ᴺᵁᴸᴸ + +Row 2: +────── +database: system +name: asynchronous_metrics +uuid: 00000000-0000-0000-0000-000000000000 +engine: SystemAsynchronousMetrics +is_temporary: 0 +data_paths: [] +metadata_path: /var/lib/clickhouse/metadata/system/asynchronous_metrics.sql +metadata_modification_time: 1970-01-01 03:00:00 +dependencies_database: [] +dependencies_table: [] +create_table_query: +engine_full: +partition_key: +sorting_key: +primary_key: +sampling_key: +storage_policy: +total_rows: ᴺᵁᴸᴸ +total_bytes: ᴺᵁᴸᴸ +``` diff --git a/docs/ru/operations/system-tables/trace_log.md b/docs/ru/operations/system-tables/trace_log.md index 3d22e4eabfd..6d8130c1d00 100644 --- a/docs/ru/operations/system-tables/trace_log.md +++ b/docs/ru/operations/system-tables/trace_log.md @@ -18,10 +18,12 @@ ClickHouse создает эту таблицу когда утсановлен Во время соединения с сервером через `clickhouse-client`, вы видите строку похожую на `Connected to ClickHouse server version 19.18.1 revision 54429.`. Это поле содержит номер после `revision`, но не содержит строку после `version`. -- `timer_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип таймера: +- `trace_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип трассировки: - - `Real` означает wall-clock время. - - `CPU` означает относительное CPU время. + - `Real` — сбор трассировок стека адресов вызова по времени wall-clock. + - `CPU` — сбор трассировок стека адресов вызова по времени CPU. + - `Memory` — сбор выделенной памяти, когда ее размер превышает относительный инкремент. + - `MemorySample` — сбор случайно выделенной памяти. - `thread_number`([UInt32](../../sql-reference/data-types/int-uint.md)) — идентификатор треда. diff --git a/docs/ru/operations/tips.md b/docs/ru/operations/tips.md deleted file mode 100644 index 4535767e8e0..00000000000 --- a/docs/ru/operations/tips.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -toc_priority: 58 -toc_title: "Советы по эксплуатации" ---- - -# Советы по эксплуатации {#sovety-po-ekspluatatsii} - -## CPU Scaling Governor {#cpu-scaling-governor} - -Всегда используйте `performance` scaling governor. `ondemand` scaling governor работает намного хуже при постоянно высоком спросе. - -``` bash -$ echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor -``` - -## Ограничение CPU {#ogranichenie-cpu} - -Процессоры могут перегреваться. С помощью `dmesg` можно увидеть, если тактовая частота процессора была ограничена из-за перегрева. -Также ограничение может устанавливаться снаружи на уровне дата-центра. С помощью `turbostat` можно за этим наблюдать под нагрузкой. - -## Оперативная память {#operativnaia-pamiat} - -Для небольших объёмов данных (до ~200 Гб в сжатом виде) лучше всего использовать столько памяти не меньше, чем объём данных. -Для больших объёмов данных, при выполнении интерактивных (онлайн) запросов, стоит использовать разумный объём оперативной памяти (128 Гб или более) для того, чтобы горячее подмножество данных поместилось в кеше страниц. -Даже для объёмов данных в ~50 Тб на сервер, использование 128 Гб оперативной памяти намного лучше для производительности выполнения запросов, чем 64 Гб. - -Не выключайте overcommit. Значение `cat /proc/sys/vm/overcommit_memory` должно быть 0 or 1. Выполните: - -``` bash -$ echo 0 | sudo tee /proc/sys/vm/overcommit_memory -``` - -## Huge Pages {#huge-pages} - -Механизм прозрачных huge pages нужно отключить. Он мешает работе аллокаторов памяти, что приводит к значительной деградации производительности. - -``` bash -$ echo 'madvise' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled -``` - -С помощью `perf top` можно наблюдать за временем, проведенном в ядре операционной системы для управления памятью. -Постоянные huge pages так же не нужно аллоцировать. - -## Подсистема хранения {#podsistema-khraneniia} - -Если ваш бюджет позволяет использовать SSD, используйте SSD. -В противном случае используйте HDD. SATA HDDs 7200 RPM подойдут. - -Предпочитайте много серверов с локальными жесткими дисками вместо меньшего числа серверов с подключенными дисковыми полками. -Но для хранения архивов с редкими запросами полки всё же подходят. - -## RAID {#raid} - -При использовании HDD можно объединить их RAID-10, RAID-5, RAID-6 или RAID-50. -Лучше использовать программный RAID в Linux (`mdadm`). Лучше не использовать LVM. -При создании RAID-10, нужно выбрать `far` расположение. -Если бюджет позволяет, лучше выбрать RAID-10. - -На более чем 4 дисках вместо RAID-5 нужно использовать RAID-6 (предпочтительнее) или RAID-50. -При использовании RAID-5, RAID-6 или RAID-50, нужно всегда увеличивать stripe_cache_size, так как значение по умолчанию выбрано не самым удачным образом. - -``` bash -$ echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size -``` - -Точное число стоит вычислять из числа устройств и размер блока по формуле: `2 * num_devices * chunk_size_in_bytes / 4096`. - -Размер блока в 1024 Кб подходит для всех конфигураций RAID. -Никогда не указывайте слишком маленький или слишком большой размер блока. - -На SSD можно использовать RAID-0. -Вне зависимости от использования RAID, всегда используйте репликацию для безопасности данных. - -Включите NCQ с длинной очередью. Для HDD стоит выбрать планировщик CFQ, а для SSD — noop. Не стоит уменьшать настройку readahead. -На HDD стоит включать кеш записи. - -## Файловая система {#failovaia-sistema} - -Ext4 самый проверенный вариант. Укажите опции монтирования `noatime,nobarrier`. -XFS также подходит, но не так тщательно протестирована в сочетании с ClickHouse. -Большинство других файловых систем также должны нормально работать. Файловые системы с отложенной аллокацией работают лучше. - -## Ядро Linux {#iadro-linux} - -Не используйте слишком старое ядро Linux. - -## Сеть {#set} - -При использовании IPv6, стоит увеличить размер кеша маршрутов. -Ядра Linux до 3.2 имели массу проблем в реализации IPv6. - -Предпочитайте как минимум 10 Гбит сеть. 1 Гбит также будет работать, но намного хуже для починки реплик с десятками терабайт данных или для обработки распределенных запросов с большим объёмом промежуточных данных. - -## ZooKeeper {#zookeeper} - -Вероятно вы уже используете ZooKeeper для других целей. Можно использовать ту же инсталляцию ZooKeeper, если она не сильно перегружена. - -Лучше использовать свежую версию ZooKeeper, как минимум 3.4.9. Версия в стабильных дистрибутивах Linux может быть устаревшей. - -Никогда не используете написанные вручную скрипты для переноса данных между разными ZooKeeper кластерами, потому что результат будет некорректный для sequential нод. Никогда не используйте утилиту «zkcopy», по той же причине: https://github.com/ksprojects/zkcopy/issues/15 - -Если вы хотите разделить существующий ZooKeeper кластер на два, правильный способ - увеличить количество его реплик, а затем переконфигурировать его как два независимых кластера. - -Не запускайте ZooKeeper на тех же серверах, что и ClickHouse. Потому что ZooKeeper очень чувствителен к задержкам, а ClickHouse может использовать все доступные системные ресурсы. - -С настройками по умолчанию, ZooKeeper является бомбой замедленного действия: - -> Сервер ZooKeeper не будет удалять файлы со старыми снепшоты и логами при использовании конфигурации по умолчанию (см. autopurge), это является ответственностью оператора. - -Эту бомбу нужно обезвредить. - -Далее описана конфигурация ZooKeeper (3.5.1), используемая в боевом окружении Яндекс.Метрики на момент 20 мая 2017 года: - -zoo.cfg: - -``` bash -# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html - -# The number of milliseconds of each tick -tickTime=2000 -# The number of ticks that the initial -# synchronization phase can take -initLimit=30000 -# The number of ticks that can pass between -# sending a request and getting an acknowledgement -syncLimit=10 - -maxClientCnxns=2000 - -maxSessionTimeout=60000000 -# the directory where the snapshot is stored. -dataDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/data -# Place the dataLogDir to a separate physical disc for better performance -dataLogDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/logs - -autopurge.snapRetainCount=10 -autopurge.purgeInterval=1 - - -# To avoid seeks ZooKeeper allocates space in the transaction log file in -# blocks of preAllocSize kilobytes. The default block size is 64M. One reason -# for changing the size of the blocks is to reduce the block size if snapshots -# are taken more often. (Also, see snapCount). -preAllocSize=131072 - -# Clients can submit requests faster than ZooKeeper can process them, -# especially if there are a lot of clients. To prevent ZooKeeper from running -# out of memory due to queued requests, ZooKeeper will throttle clients so that -# there is no more than globalOutstandingLimit outstanding requests in the -# system. The default limit is 1,000.ZooKeeper logs transactions to a -# transaction log. After snapCount transactions are written to a log file a -# snapshot is started and a new transaction log file is started. The default -# snapCount is 10,000. -snapCount=3000000 - -# If this option is defined, requests will be will logged to a trace file named -# traceFile.year.month.day. -#traceFile= - -# Leader accepts client connections. Default value is "yes". The leader machine -# coordinates updates. For higher update throughput at thes slight expense of -# read throughput the leader can be configured to not accept clients and focus -# on coordination. -leaderServes=yes - -standaloneEnabled=false -dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/zoo.cfg.dynamic -``` - -Версия Java: - -``` text -Java(TM) SE Runtime Environment (build 1.8.0_25-b17) -Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode) -``` - -Параметры JVM: - -``` bash -NAME=zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }} -ZOOCFGDIR=/etc/$NAME/conf - -# TODO this is really ugly -# How to find out, which jars are needed? -# seems, that log4j requires the log4j.properties file to be in the classpath -CLASSPATH="$ZOOCFGDIR:/usr/build/classes:/usr/build/lib/*.jar:/usr/share/zookeeper/zookeeper-3.5.1-metrika.jar:/usr/share/zookeeper/slf4j-log4j12-1.7.5.jar:/usr/share/zookeeper/slf4j-api-1.7.5.jar:/usr/share/zookeeper/servlet-api-2.5-20081211.jar:/usr/share/zookeeper/netty-3.7.0.Final.jar:/usr/share/zookeeper/log4j-1.2.16.jar:/usr/share/zookeeper/jline-2.11.jar:/usr/share/zookeeper/jetty-util-6.1.26.jar:/usr/share/zookeeper/jetty-6.1.26.jar:/usr/share/zookeeper/javacc.jar:/usr/share/zookeeper/jackson-mapper-asl-1.9.11.jar:/usr/share/zookeeper/jackson-core-asl-1.9.11.jar:/usr/share/zookeeper/commons-cli-1.2.jar:/usr/src/java/lib/*.jar:/usr/etc/zookeeper" - -ZOOCFG="$ZOOCFGDIR/zoo.cfg" -ZOO_LOG_DIR=/var/log/$NAME -USER=zookeeper -GROUP=zookeeper -PIDDIR=/var/run/$NAME -PIDFILE=$PIDDIR/$NAME.pid -SCRIPTNAME=/etc/init.d/$NAME -JAVA=/usr/bin/java -ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain" -ZOO_LOG4J_PROP="INFO,ROLLINGFILE" -JMXLOCALONLY=false -JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '{{' }} '}}' }} \ - -Xmx{{ '{{' }} cluster.get('xmx','1G') {{ '{{' }} '}}' }} \ - -Xloggc:/var/log/$NAME/zookeeper-gc.log \ - -XX:+UseGCLogFileRotation \ - -XX:NumberOfGCLogFiles=16 \ - -XX:GCLogFileSize=16M \ - -verbose:gc \ - -XX:+PrintGCTimeStamps \ - -XX:+PrintGCDateStamps \ - -XX:+PrintGCDetails - -XX:+PrintTenuringDistribution \ - -XX:+PrintGCApplicationStoppedTime \ - -XX:+PrintGCApplicationConcurrentTime \ - -XX:+PrintSafepointStatistics \ - -XX:+UseParNewGC \ - -XX:+UseConcMarkSweepGC \ --XX:+CMSParallelRemarkEnabled" -``` - -Salt init: - -``` text -description "zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }} centralized coordination service" - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn - -limit nofile 8192 8192 - -pre-start script - [ -r "/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/environment" ] || exit 0 - . /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/environment - [ -d $ZOO_LOG_DIR ] || mkdir -p $ZOO_LOG_DIR - chown $USER:$GROUP $ZOO_LOG_DIR -end script - -script - . /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }}/conf/environment - [ -r /etc/default/zookeeper ] && . /etc/default/zookeeper - if [ -z "$JMXDISABLE" ]; then - JAVA_OPTS="$JAVA_OPTS -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY" - fi - exec start-stop-daemon --start -c $USER --exec $JAVA --name zookeeper-{{ '{{' }} cluster['name'] {{ '{{' }} '}}' }} \ - -- -cp $CLASSPATH $JAVA_OPTS -Dzookeeper.log.dir=${ZOO_LOG_DIR} \ - -Dzookeeper.root.logger=${ZOO_LOG4J_PROP} $ZOOMAIN $ZOOCFG -end script -``` - diff --git a/docs/ru/operations/tips.md b/docs/ru/operations/tips.md new file mode 120000 index 00000000000..9b3413bdbc3 --- /dev/null +++ b/docs/ru/operations/tips.md @@ -0,0 +1 @@ +../../en/operations/tips.md \ No newline at end of file diff --git a/docs/ru/operations/update.md b/docs/ru/operations/update.md index 5c187ed1604..a3e87b52ede 100644 --- a/docs/ru/operations/update.md +++ b/docs/ru/operations/update.md @@ -3,7 +3,7 @@ toc_priority: 47 toc_title: "Обновление ClickHouse" --- -# Обновление ClickHouse {#obnovlenie-clickhouse} +# Обновление ClickHouse {#clickhouse-upgrade} Если ClickHouse установлен с помощью deb-пакетов, выполните следующие команды на сервере: @@ -15,4 +15,17 @@ $ sudo service clickhouse-server restart Если ClickHouse установлен не из рекомендуемых deb-пакетов, используйте соответствующий метод обновления. -ClickHouse не поддерживает распределенное обновление. Операция должна выполняться последовательно на каждом отдельном сервере. Не обновляйте все серверы в кластере одновременно, иначе кластер становится недоступным в течение некоторого времени. +!!! note "Примечание" + Вы можете обновить сразу несколько серверов, кроме случая, когда все реплики одного шарда отключены. + +Обновление ClickHouse до определенной версии: + +**Пример** + +`xx.yy.a.b` — это номер текущей стабильной версии. Последнюю стабильную версию можно узнать [здесь](https://github.com/ClickHouse/ClickHouse/releases) + +```bash +$ sudo apt-get update +$ sudo apt-get install clickhouse-server=xx.yy.a.b clickhouse-client=xx.yy.a.b clickhouse-common-static=xx.yy.a.b +$ sudo service clickhouse-server restart +``` diff --git a/docs/ru/operations/utilities/clickhouse-compressor.md b/docs/ru/operations/utilities/clickhouse-compressor.md new file mode 100644 index 00000000000..d7f6862a62c --- /dev/null +++ b/docs/ru/operations/utilities/clickhouse-compressor.md @@ -0,0 +1,27 @@ +## ClickHouse compressor + +Simple program for data compression and decompression in ClickHouse way. + +### Examples + +Compress data with LZ4: +``` +$ ./clickhouse-compressor < input_file > output_file +``` + +Decompress data from LZ4 format: +``` +$ ./clickhouse-compressor --decompress < input_file > output_file +``` + +Compress data with ZSTD at level 5: + +``` +$ ./clickhouse-compressor --codec 'ZSTD(5)' < input_file > output_file +``` + +Compress data with Delta of four bytes and ZSTD level 10. + +``` +$ ./clickhouse-compressor --codec 'Delta(4)' --codec 'ZSTD(10)' < input_file > output_file +``` diff --git a/docs/ru/operations/utilities/clickhouse-format.md b/docs/ru/operations/utilities/clickhouse-format.md new file mode 100644 index 00000000000..43043fcc1d5 --- /dev/null +++ b/docs/ru/operations/utilities/clickhouse-format.md @@ -0,0 +1,98 @@ +--- +toc_priority: 65 +toc_title: clickhouse-format +--- + +# clickhouse-format {#clickhouse-format} + +Позволяет форматировать входящие запросы. + +Ключи: + +- `--help` или`-h` — выводит описание ключей. +- `--hilite` — добавляет подсветку синтаксиса с экранированием символов. +- `--oneline` — форматирование в одну строку. +- `--quiet` или `-q` — проверяет синтаксис без вывода результата. +- `--multiquery` or `-n` — поддерживает несколько запросов в одной строке. +- `--obfuscate` — обфусцирует вместо форматирования. +- `--seed <строка>` — задает строку, которая определяет результат обфускации. +- `--backslash` — добавляет обратный слеш в конце каждой строки отформатированного запроса. Удобно использовать если многострочный запрос скопирован из интернета или другого источника и его нужно выполнить из командной строки. + +## Примеры {#examples} + +1. Подсветка синтаксиса и форматирование в одну строку: + +```bash +$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);" +``` + +Результат: + +```sql +SELECT sum(number) FROM numbers(5) +``` + +2. Несколько запросов в одной строке: + +```bash +$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" +``` + +Результат: + +```text +SELECT * +FROM +( + SELECT 1 AS x + UNION ALL + SELECT 1 + UNION DISTINCT + SELECT 3 +) +; +``` + +3. Обфускация: + +```bash +$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" +``` + +Результат: + +```text +SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END; +``` + +Тот же запрос с другой инициализацией обфускатора: + +```bash +$ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" +``` + +Результат: + +```text +SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END; +``` + +4. Добавление обратного слеша: + +```bash +$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" +``` + +Результат: + +```text +SELECT * \ +FROM \ +( \ + SELECT 1 AS x \ + UNION ALL \ + SELECT 1 \ + UNION DISTINCT \ + SELECT 3 \ +) +``` diff --git a/docs/ru/operations/utilities/index.md b/docs/ru/operations/utilities/index.md index fa257fb4b1a..88bd0305386 100644 --- a/docs/ru/operations/utilities/index.md +++ b/docs/ru/operations/utilities/index.md @@ -6,6 +6,10 @@ toc_title: "Обзор" # Утилиты ClickHouse {#utility-clickhouse} -- [clickhouse-local](clickhouse-local.md) +- [clickhouse-local](clickhouse-local.md) - позволяет выполнять SQL-запросы над данными без остановки сервера ClickHouse, подобно утилите `awk`. - [clickhouse-copier](clickhouse-copier.md) - копирует (и перешардирует) данные с одного кластера на другой. - +- [clickhouse-benchmark](../../operations/utilities/clickhouse-benchmark.md) — устанавливает соединение с сервером ClickHouse и запускает циклическое выполнение указанных запросов. +- [clickhouse-format](../../operations/utilities/clickhouse-format.md) — позволяет форматировать входящие запросы. +- [ClickHouse obfuscator](../../operations/utilities/clickhouse-obfuscator.md) — обфусцирует данные. +- [ClickHouse compressor](../../operations/utilities/clickhouse-compressor.md) — упаковывает и распаковывает данные. +- [clickhouse-odbc-bridge](../../operations/utilities/odbc-bridge.md) — прокси-сервер для ODBC. diff --git a/docs/ru/operations/utilities/odbc-bridge.md b/docs/ru/operations/utilities/odbc-bridge.md new file mode 100644 index 00000000000..39c796c10c1 --- /dev/null +++ b/docs/ru/operations/utilities/odbc-bridge.md @@ -0,0 +1,38 @@ +# clickhouse-odbc-bridge + +Simple HTTP-server which works like a proxy for ODBC driver. The main motivation +was possible segfaults or another faults in ODBC implementations, which can +crash whole clickhouse-server process. + +This tool works via HTTP, not via pipes, shared memory, or TCP because: +- It's simpler to implement +- It's simpler to debug +- jdbc-bridge can be implemented in the same way + +## Usage + +`clickhouse-server` use this tool inside odbc table function and StorageODBC. +However it can be used as standalone tool from command line with the following +parameters in POST-request URL: +- `connection_string` -- ODBC connection string. +- `columns` -- columns in ClickHouse NamesAndTypesList format, name in backticks, + type as string. Name and type are space separated, rows separated with + newline. +- `max_block_size` -- optional parameter, sets maximum size of single block. +Query is send in post body. Response is returned in RowBinary format. + +## Example: + +```bash +$ clickhouse-odbc-bridge --http-port 9018 --daemon + +$ curl -d "query=SELECT PageID, ImpID, AdType FROM Keys ORDER BY PageID, ImpID" --data-urlencode "connection_string=DSN=ClickHouse;DATABASE=stat" --data-urlencode "columns=columns format version: 1 +3 columns: +\`PageID\` String +\`ImpID\` String +\`AdType\` String +" "http://localhost:9018/" > result.txt + +$ cat result.txt # Result in RowBinary format +12246623837185725195925621517 +``` diff --git a/docs/ru/sql-reference/aggregate-functions/combinators.md b/docs/ru/sql-reference/aggregate-functions/combinators.md index eb52fa9bc75..74f9d1c1c05 100644 --- a/docs/ru/sql-reference/aggregate-functions/combinators.md +++ b/docs/ru/sql-reference/aggregate-functions/combinators.md @@ -27,6 +27,40 @@ toc_title: "Комбинаторы агрегатных функций" Комбинаторы -If и -Array можно сочетать. При этом, должен сначала идти Array, а потом If. Примеры: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Из-за такого порядка получается, что аргумент cond не должен быть массивом. +## -SimpleState {#agg-functions-combinator-simplestate} + +При использовании этого комбинатора агрегатная функция возвращает то же значение, но типа [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md). Текущее значение функции может храниться в таблице для последующей работы с таблицами семейства [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md). + +**Синтаксис** + +``` sql +SimpleState(x) +``` + +**Аргументы** + +- `x` — параметры агрегатной функции. + +**Возвращаемое значение** + +Значение агрегатной функции типа `SimpleAggregateFunction(...)`. + +**Пример** + +Запрос: + +``` sql +WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1); +``` + +Результат: + +``` text +┌─toTypeName(c)────────────────────────┬─c─┐ +│ SimpleAggregateFunction(any, UInt64) │ 0 │ +└──────────────────────────────────────┴───┘ +``` + ## -State {#state} В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции [uniq](reference/uniq.md#agg_function-uniq) — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип `AggregateFunction(...)` и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации. @@ -247,4 +281,3 @@ FROM people │ [3,2] │ [11.5,12.949999809265137] │ └────────┴───────────────────────────┘ ``` - diff --git a/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md b/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md index b025a248f3c..6825847f256 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md @@ -6,6 +6,9 @@ toc_priority: 141 Суммирует арифметическую разницу между последовательными строками. Если разница отрицательна — она будет проигнорирована. +!!! info "Примечание" + Чтобы эта функция работала должным образом, исходные данные должны быть отсортированы. В [материализованном представлении](../../../sql-reference/statements/create/view.md#materialized) вместо нее рекомендуется использовать [deltaSumTimestamp](../../../sql-reference/aggregate-functions/reference/deltasumtimestamp.md#agg_functions-deltasumtimestamp). + **Синтаксис** ``` sql @@ -18,7 +21,8 @@ deltaSum(value) **Возвращаемое значение** -- накопленная арифметическая разница, типа `Integer` или `Float`. +- Накопленная арифметическая разница. +Тип: `Integer` или `Float`. **Примеры** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/deltasumtimestamp.md b/docs/ru/sql-reference/aggregate-functions/reference/deltasumtimestamp.md new file mode 100644 index 00000000000..10294eb9e6d --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/deltasumtimestamp.md @@ -0,0 +1,45 @@ +--- +toc_priority: 141 +--- + +# deltaSumTimestamp {#agg_functions-deltasumtimestamp} + +Суммирует разницу между последовательными строками. Если разница отрицательна — она будет проигнорирована. + +Эта функция предназначена в первую очередь для [материализованных представлений](../../../sql-reference/statements/create/view.md#materialized), упорядоченных по некоторому временному бакету согласно timestamp, например, по бакету `toStartOfMinute`. Поскольку строки в таком материализованном представлении будут иметь одинаковый timestamp, невозможно объединить их в "правом" порядке. Функция отслеживает `timestamp` наблюдаемых значений, поэтому возможно правильно упорядочить состояния во время слияния. + +Чтобы вычислить разницу между упорядоченными последовательными строками, вы можете использовать функцию [deltaSum](../../../sql-reference/aggregate-functions/reference/deltasum.md#agg_functions-deltasum) вместо функции `deltaSumTimestamp`. + +**Синтаксис** + +``` sql +deltaSumTimestamp(value, timestamp) +``` + +**Аргументы** + +- `value` — входные значения, должны быть типа [Integer](../../data-types/int-uint.md), или [Float](../../data-types/float.md), или [Date](../../data-types/date.md), или [DateTime](../../data-types/datetime.md). +- `timestamp` — параметр для упорядочивания значений, должен быть типа [Integer](../../data-types/int-uint.md), или [Float](../../data-types/float.md), или [Date](../../data-types/date.md), или [DateTime](../../data-types/datetime.md). + +**Возвращаемое значение** + +- Накопленная разница между последовательными значениями, упорядоченными по параметру `timestamp`. + +Тип: [Integer](../../data-types/int-uint.md), или [Float](../../data-types/float.md), или [Date](../../data-types/date.md), или [DateTime](../../data-types/datetime.md). + +**Пример** + +Запрос: + +```sql +SELECT deltaSumTimestamp(value, timestamp) +FROM (SELECT number AS timestamp, [0, 4, 8, 3, 0, 0, 0, 1, 3, 5][number] AS value FROM numbers(1, 10)); +``` + +Результат: + +``` text +┌─deltaSumTimestamp(value, timestamp)─┐ +│ 13 │ +└─────────────────────────────────────┘ +``` diff --git a/docs/ru/sql-reference/aggregate-functions/reference/max.md b/docs/ru/sql-reference/aggregate-functions/reference/max.md deleted file mode 100644 index 4f61ecd051d..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/max.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -toc_priority: 3 ---- - -# max {#agg_function-max} - -Вычисляет максимум. - diff --git a/docs/ru/sql-reference/aggregate-functions/reference/max.md b/docs/ru/sql-reference/aggregate-functions/reference/max.md new file mode 120000 index 00000000000..ae47679c80e --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/max.md @@ -0,0 +1 @@ +../../../../en/sql-reference/aggregate-functions/reference/max.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/min.md b/docs/ru/sql-reference/aggregate-functions/reference/min.md deleted file mode 100644 index 16dd577e790..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/min.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -toc_priority: 2 ---- - -## min {#agg_function-min} - -Вычисляет минимум. - diff --git a/docs/ru/sql-reference/aggregate-functions/reference/min.md b/docs/ru/sql-reference/aggregate-functions/reference/min.md new file mode 120000 index 00000000000..61417b347a8 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/min.md @@ -0,0 +1 @@ +../../../../en/sql-reference/aggregate-functions/reference/min.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index f7239be0ba5..6dce79d8a89 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -12,6 +12,9 @@ toc_priority: 208 Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. +!!! note "Примечание" + Использование `quantileTDigestWeighted` [не рекомендуется для небольших наборов данных](https://github.com/tdunning/t-digest/issues/167#issuecomment-828650275) и может привести к значительной ошибке. Рассмотрите возможность использования [`quantileTDigest`](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) в таких случаях. + **Синтаксис** ``` sql diff --git a/docs/ru/sql-reference/data-types/datetime.md b/docs/ru/sql-reference/data-types/datetime.md index ebd780d0d7d..c9804f57c33 100644 --- a/docs/ru/sql-reference/data-types/datetime.md +++ b/docs/ru/sql-reference/data-types/datetime.md @@ -20,8 +20,7 @@ DateTime([timezone]) ## Использование {#ispolzovanie} Момент времени сохраняется как [Unix timestamp](https://ru.wikipedia.org/wiki/Unix-%D0%B2%D1%80%D0%B5%D0%BC%D1%8F), независимо от часового пояса и переходов на летнее/зимнее время. Дополнительно, тип `DateTime` позволяет хранить часовой пояс, единый для всей колонки, который влияет на то, как будут отображаться значения типа `DateTime` в текстовом виде и как будут парситься значения заданные в виде строк (‘2020-01-01 05:00:01’). Часовой пояс не хранится в строках таблицы (выборки), а хранится в метаданных колонки. -Список поддерживаемых временных зон можно найти в [IANA Time Zone Database](https://www.iana.org/time-zones). -Пакет `tzdata`, содержащий [базу данных часовых поясов IANA](https://www.iana.org/time-zones), должен быть установлен в системе. Используйте команду `timedatectl list-timezones` для получения списка часовых поясов, известных локальной системе. +Список поддерживаемых часовых поясов можно найти в [IANA Time Zone Database](https://www.iana.org/time-zones) или получить из базы данных, выполнив запрос `SELECT * FROM system.time_zones`. Также [список](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) есть в Википедии. Часовой пояс для столбца типа `DateTime` можно в явном виде установить при создании таблицы. Если часовой пояс не установлен, то ClickHouse использует значение параметра [timezone](../../sql-reference/data-types/datetime.md#server_configuration_parameters-timezone), установленное в конфигурации сервера или в настройках операционной системы на момент запуска сервера. diff --git a/docs/ru/sql-reference/data-types/datetime64.md b/docs/ru/sql-reference/data-types/datetime64.md index 6576bf9dc0d..3a08da75bb7 100644 --- a/docs/ru/sql-reference/data-types/datetime64.md +++ b/docs/ru/sql-reference/data-types/datetime64.md @@ -7,9 +7,9 @@ toc_title: DateTime64 Позволяет хранить момент времени, который может быть представлен как календарная дата и время, с заданной суб-секундной точностью. -Размер тика/точность: 10-precision секунд, где precision - целочисленный параметр типа. +Размер тика (точность, precision): 10-precision секунд, где precision - целочисленный параметр. -Синтаксис: +**Синтаксис:** ``` sql DateTime64(precision, [timezone]) @@ -17,9 +17,11 @@ DateTime64(precision, [timezone]) Данные хранятся в виде количества ‘тиков’, прошедших с момента начала эпохи (1970-01-01 00:00:00 UTC), в Int64. Размер тика определяется параметром precision. Дополнительно, тип `DateTime64` позволяет хранить часовой пояс, единый для всей колонки, который влияет на то, как будут отображаться значения типа `DateTime64` в текстовом виде и как будут парситься значения заданные в виде строк (‘2020-01-01 05:00:01.000’). Часовой пояс не хранится в строках таблицы (выборки), а хранится в метаданных колонки. Подробнее см. [DateTime](datetime.md). -## Пример {#primer} +Поддерживаются значения от 1 января 1925 г. и до 31 декабря 2283 г. -**1.** Создание таблицы с столбцом типа `DateTime64` и вставка данных в неё: +## Примеры {#examples} + +1. Создание таблицы со столбцом типа `DateTime64` и вставка данных в неё: ``` sql CREATE TABLE dt @@ -27,15 +29,15 @@ CREATE TABLE dt `timestamp` DateTime64(3, 'Europe/Moscow'), `event_id` UInt8 ) -ENGINE = TinyLog +ENGINE = TinyLog; ``` ``` sql -INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) +INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2); ``` ``` sql -SELECT * FROM dt +SELECT * FROM dt; ``` ``` text @@ -46,12 +48,12 @@ SELECT * FROM dt ``` - При вставке даты-времени как числа (аналогично ‘Unix timestamp’), время трактуется как UTC. Unix timestamp `1546300800` в часовом поясе `Europe/London (UTC+0)` представляет время `'2019-01-01 00:00:00'`. Однако, столбец `timestamp` имеет тип `DateTime('Europe/Moscow (UTC+3)')`, так что при выводе в виде строки время отобразится как `2019-01-01 03:00:00`. -- При вставке даты-времени в виде строки, время трактуется соответственно часовому поясу установленному для колонки. `'2019-01-01 00:00:00'` трактуется как время по Москве (и в базу сохраняется `'2018-12-31 21:00:00'` в виде Unix Timestamp) +- При вставке даты-времени в виде строки, время трактуется соответственно часовому поясу установленному для колонки. `'2019-01-01 00:00:00'` трактуется как время по Москве (и в базу сохраняется `'2018-12-31 21:00:00'` в виде Unix Timestamp). -**2.** Фильтрация по значениям даты-времени +2. Фильтрация по значениям даты и времени ``` sql -SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') +SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow'); ``` ``` text @@ -60,12 +62,12 @@ SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europ └─────────────────────────┴──────────┘ ``` -В отличие от типа `DateTime`, `DateTime64` не конвертируется из строк автоматически +В отличие от типа `DateTime`, `DateTime64` не конвертируется из строк автоматически. -**3.** Получение часового пояса для значения типа `DateTime64`: +3. Получение часового пояса для значения типа `DateTime64`: ``` sql -SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x +SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x; ``` ``` text @@ -74,13 +76,13 @@ SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS └─────────────────────────┴────────────────────────────────┘ ``` -**4.** Конвертация часовых поясов +4. Конвертация часовых поясов ``` sql SELECT toDateTime64(timestamp, 3, 'Europe/London') as lon_time, toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time -FROM dt +FROM dt; ``` ``` text @@ -90,7 +92,7 @@ FROM dt └─────────────────────────┴─────────────────────────┘ ``` -## See Also {#see-also} +**See Also** - [Функции преобразования типов](../../sql-reference/functions/type-conversion-functions.md) - [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md) diff --git a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md index 0948153362b..7b81c577762 100644 --- a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md @@ -3,6 +3,8 @@ Хранит только текущее значение агрегатной функции и не сохраняет ее полное состояние, как это делает [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md). Такая оптимизация может быть применена к функциям, которые обладают следующим свойством: результат выполнения функции `f` к набору строк `S1 UNION ALL S2` может быть получен путем выполнения `f` к отдельным частям набора строк, а затем повторного выполнения `f` к результатам: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. Это свойство гарантирует, что результатов частичной агрегации достаточно для вычисления комбинированной, поэтому хранить и обрабатывать какие-либо дополнительные данные не требуется. +Чтобы получить промежуточное значение, обычно используются агрегатные функции с суффиксом [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate). + Поддерживаются следующие агрегатные функции: - [`any`](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any) @@ -15,10 +17,12 @@ - [`groupBitOr`](../../sql-reference/aggregate-functions/reference/groupbitor.md#groupbitor) - [`groupBitXor`](../../sql-reference/aggregate-functions/reference/groupbitxor.md#groupbitxor) - [`groupArrayArray`](../../sql-reference/aggregate-functions/reference/grouparray.md#agg_function-grouparray) -- [`groupUniqArrayArray`](../../sql-reference/aggregate-functions/reference/groupuniqarray.md#groupuniqarray) +- [`groupUniqArrayArray`](../../sql-reference/aggregate-functions/reference/groupuniqarray.md) - [`sumMap`](../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap) - [`minMap`](../../sql-reference/aggregate-functions/reference/minmap.md#agg_functions-minmap) - [`maxMap`](../../sql-reference/aggregate-functions/reference/maxmap.md#agg_functions-maxmap) +- [`argMin`](../../sql-reference/aggregate-functions/reference/argmin.md) +- [`argMax`](../../sql-reference/aggregate-functions/reference/argmax.md) !!! note "Примечание" Значения `SimpleAggregateFunction(func, Type)` отображаются и хранятся так же, как и `Type`, поэтому комбинаторы [-Merge](../../sql-reference/aggregate-functions/combinators.md#aggregate_functions_combinators-merge) и [-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state) не требуются. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index 285982565c2..e63b5574e30 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -9,7 +9,7 @@ toc_title: "Хранение словарей в памяти" Рекомендуем [flat](#flat), [hashed](#dicts-external_dicts_dict_layout-hashed) и [complex_key_hashed](#complex-key-hashed). Скорость обработки словарей при этом максимальна. -Размещение с кэшированием не рекомендуется использовать из-за потенциально низкой производительности и сложностей в подборе оптимальных параметров. Читайте об этом подробнее в разделе «[cache](#cache)». +Размещение с кэшированием не рекомендуется использовать из-за потенциально низкой производительности и сложностей в подборе оптимальных параметров. Читайте об этом подробнее в разделе [cache](#cache). Повысить производительность словарей можно следующими способами: @@ -48,7 +48,7 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings ... ``` -## Способы размещения словарей в памяти {#sposoby-razmeshcheniia-slovarei-v-pamiati} +## Способы размещения словарей в памяти {#ways-to-store-dictionaries-in-memory} - [flat](#flat) - [hashed](#dicts-external_dicts_dict_layout-hashed) @@ -65,11 +65,11 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings ### flat {#flat} -Словарь полностью хранится в оперативной памяти в виде плоских массивов. Объём памяти, занимаемой словарём пропорционален размеру самого большого по размеру ключа. +Словарь полностью хранится в оперативной памяти в виде плоских массивов. Объём памяти, занимаемой словарём, пропорционален размеру самого большого ключа (по объему). -Ключ словаря имеет тип `UInt64` и его величина ограничена 500 000. Если при создании словаря обнаружен ключ больше, то ClickHouse бросает исключение и не создает словарь. +Ключ словаря имеет тип [UInt64](../../../sql-reference/data-types/int-uint.md) и его величина ограничена параметром `max_array_size` (значение по умолчанию — 500 000). Если при создании словаря обнаружен ключ больше, то ClickHouse бросает исключение и не создает словарь. Начальный размер плоских массивов словарей контролируется параметром initial_array_size (по умолчанию - 1024). -Поддерживаются все виды источников. При обновлении, данные (из файла, из таблицы) читаются целиком. +Поддерживаются все виды источников. При обновлении данные (из файла или из таблицы) считываются целиком. Это метод обеспечивает максимальную производительность среди всех доступных способов размещения словаря. @@ -77,40 +77,52 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings ``` xml - + + 50000 + 5000000 + ``` или ``` sql -LAYOUT(FLAT()) +LAYOUT(FLAT(INITIAL_ARRAY_SIZE 50000 MAX_ARRAY_SIZE 5000000)) ``` ### hashed {#dicts-external_dicts_dict_layout-hashed} -Словарь полностью хранится в оперативной памяти в виде хэш-таблиц. Словарь может содержать произвольное количество элементов с произвольными идентификаторами. На практике, количество ключей может достигать десятков миллионов элементов. +Словарь полностью хранится в оперативной памяти в виде хэш-таблиц. Словарь может содержать произвольное количество элементов с произвольными идентификаторами. На практике количество ключей может достигать десятков миллионов элементов. -Поддерживаются все виды источников. При обновлении, данные (из файла, из таблицы) читаются целиком. +Если `preallocate` имеет значение `true` (по умолчанию `false`), хеш-таблица будет предварительно определена (это ускорит загрузку словаря). Используйте этот метод только в случае, если: + +- Источник поддерживает произвольное количество элементов (пока поддерживается только источником `ClickHouse`). +- В данных нет дубликатов (иначе это может увеличить объем используемой памяти хеш-таблицы). + +Поддерживаются все виды источников. При обновлении данные (из файла, из таблицы) читаются целиком. Пример конфигурации: ``` xml - + + 0 + ``` или ``` sql -LAYOUT(HASHED()) +LAYOUT(HASHED(PREALLOCATE 0)) ``` ### sparse_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} Аналогичен `hashed`, но при этом занимает меньше места в памяти и генерирует более высокую загрузку CPU. +Для этого типа размещения также можно задать `preallocate` в значении `true`. В данном случае это более важно, чем для типа `hashed`. + Пример конфигурации: ``` xml @@ -122,7 +134,7 @@ LAYOUT(HASHED()) или ``` sql -LAYOUT(SPARSE_HASHED()) +LAYOUT(SPARSE_HASHED([PREALLOCATE 0])) ``` ### complex_key_hashed {#complex-key-hashed} @@ -440,4 +452,3 @@ dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) Никакие другие типы не поддерживаются. Функция возвращает атрибут для префикса, соответствующего данному IP-адресу. Если есть перекрывающиеся префиксы, возвращается наиболее специфический. Данные должны полностью помещаться в оперативной памяти. - diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 9589353649d..1298f05eca0 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -86,3 +86,4 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher ... ``` +Для словарей `Cache`, `ComplexKeyCache`, `SSDCache` и `SSDComplexKeyCache` поддерживается как синхронное, так и асинхронное обновление. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index 57f53390d1c..df4742fca45 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -3,7 +3,7 @@ toc_priority: 44 toc_title: "Ключ и поля словаря" --- -# Ключ и поля словаря {#kliuch-i-polia-slovaria} +# Ключ и поля словаря {#dictionary-key-and-fields} Секция `` описывает ключ словаря и поля, доступные для запросов. @@ -88,7 +88,7 @@ PRIMARY KEY Id - `PRIMARY KEY` – имя столбца с ключами. -### Составной ключ {#sostavnoi-kliuch} +### Составной ключ {#composite-key} Ключом может быть кортеж (`tuple`) из полей произвольных типов. В этом случае [layout](external-dicts-dict-layout.md) должен быть `complex_key_hashed` или `complex_key_cache`. @@ -159,13 +159,12 @@ CREATE DICTIONARY somename ( | Тег | Описание | Обязательный | |------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| | `name` | Имя столбца. | Да | -| `type` | Тип данных ClickHouse.
ClickHouse пытается привести значение из словаря к заданному типу данных. Например, в случае MySQL, в таблице-источнике поле может быть `TEXT`, `VARCHAR`, `BLOB`, но загружено может быть как `String`. [Nullable](../../../sql-reference/data-types/nullable.md) не поддерживается. | Да | -| `null_value` | Значение по умолчанию для несуществующего элемента.
В примере это пустая строка. Нельзя указать значение `NULL`. | Да | +| `type` | Тип данных ClickHouse.
ClickHouse пытается привести значение из словаря к заданному типу данных. Например, в случае MySQL, в таблице-источнике поле может быть `TEXT`, `VARCHAR`, `BLOB`, но загружено может быть как `String`.
[Nullable](../../../sql-reference/data-types/nullable.md) в настоящее время поддерживается для словарей [Flat](external-dicts-dict-layout.md#flat), [Hashed](external-dicts-dict-layout.md#dicts-external_dicts_dict_layout-hashed), [ComplexKeyHashed](external-dicts-dict-layout.md#complex-key-hashed), [Direct](external-dicts-dict-layout.md#direct), [ComplexKeyDirect](external-dicts-dict-layout.md#complex-key-direct), [RangeHashed](external-dicts-dict-layout.md#range-hashed), [Polygon](external-dicts-dict-polygon.md), [Cache](external-dicts-dict-layout.md#cache), [ComplexKeyCache](external-dicts-dict-layout.md#complex-key-cache), [SSDCache](external-dicts-dict-layout.md#ssd-cache), [SSDComplexKeyCache](external-dicts-dict-layout.md#complex-key-ssd-cache). Для словарей [IPTrie](external-dicts-dict-layout.md#ip-trie) `Nullable`-типы не поддерживаются. | Да | +| `null_value` | Значение по умолчанию для несуществующего элемента.
В примере это пустая строка. Значение [NULL](../../syntax.md#null-literal) можно указывать только для типов `Nullable` (см. предыдущую строку с описанием типов). | Да | | `expression` | [Выражение](../../syntax.md#syntax-expressions), которое ClickHouse выполняет со значением.
Выражением может быть имя столбца в удаленной SQL базе. Таким образом, вы можете использовать его для создания псевдонима удаленного столбца.

Значение по умолчанию: нет выражения. | Нет | -| `hierarchical` | Если `true`, то атрибут содержит ключ предка для текущего элемента. Смотрите [Иерархические словари](external-dicts-dict-hierarchical.md).

Default value: `false`. | No | +| `hierarchical` | Если `true`, то атрибут содержит ключ предка для текущего элемента. Смотрите [Иерархические словари](external-dicts-dict-hierarchical.md).

Значение по умолчанию: `false`. | Нет | | `is_object_id` | Признак того, что запрос выполняется к документу MongoDB по `ObjectID`.

Значение по умолчанию: `false`. | Нет | -## Смотрите также {#smotrite-takzhe} +**Смотрите также** - [Функции для работы с внешними словарями](../../../sql-reference/functions/ext-dict-functions.md). - diff --git a/docs/ru/sql-reference/dictionaries/index.md b/docs/ru/sql-reference/dictionaries/index.md index bd432497be8..59c7518d0c5 100644 --- a/docs/ru/sql-reference/dictionaries/index.md +++ b/docs/ru/sql-reference/dictionaries/index.md @@ -10,8 +10,6 @@ toc_title: "Введение" ClickHouse поддерживает специальные функции для работы со словарями, которые можно использовать в запросах. Проще и эффективнее использовать словари с помощью функций, чем `JOIN` с таблицами-справочниками. -В словаре нельзя хранить значения [NULL](../../sql-reference/syntax.md#null-literal). - ClickHouse поддерживает: - [Встроенные словари](internal-dicts.md#internal_dicts) со специфическим [набором функций](../../sql-reference/dictionaries/external-dictionaries/index.md). diff --git a/docs/ru/sql-reference/functions/array-functions.md b/docs/ru/sql-reference/functions/array-functions.md index 560795506a0..10fc91de205 100644 --- a/docs/ru/sql-reference/functions/array-functions.md +++ b/docs/ru/sql-reference/functions/array-functions.md @@ -1528,3 +1528,52 @@ SELECT arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]); └────────────────────────────────────────---──┘ ``` +## arrayProduct {#arrayproduct} + +Возвращает произведение элементов [массива](../../sql-reference/data-types/array.md). + +**Синтаксис** + +``` sql +arrayProduct(arr) +``` + +**Аргументы** + +- `arr` — [массив](../../sql-reference/data-types/array.md) числовых значений. + +**Возвращаемое значение** + +- Произведение элементов массива. + +Тип: [Float64](../../sql-reference/data-types/float.md). + +**Примеры** + +Запрос: + +``` sql +SELECT arrayProduct([1,2,3,4,5,6]) as res; +``` + +Результат: + +``` text +┌─res───┐ +│ 720 │ +└───────┘ +``` + +Запрос: + +``` sql +SELECT arrayProduct([toDecimal64(1,8), toDecimal64(2,8), toDecimal64(3,8)]) as res, toTypeName(res); +``` + +Возвращаемое значение всегда имеет тип [Float64](../../sql-reference/data-types/float.md). Результат: + +``` text +┌─res─┬─toTypeName(arrayProduct(array(toDecimal64(1, 8), toDecimal64(2, 8), toDecimal64(3, 8))))─┐ +│ 6 │ Float64 │ +└─────┴──────────────────────────────────────────────────────────────────────────────────────────┘ +``` \ No newline at end of file diff --git a/docs/ru/sql-reference/functions/bitmap-functions.md b/docs/ru/sql-reference/functions/bitmap-functions.md index ddae2f3eb40..3da729664d0 100644 --- a/docs/ru/sql-reference/functions/bitmap-functions.md +++ b/docs/ru/sql-reference/functions/bitmap-functions.md @@ -25,7 +25,7 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res); ``` text ┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ -│  │ AggregateFunction(groupBitmap, UInt8) │ +│ │ AggregateFunction(groupBitmap, UInt8) │ └─────┴──────────────────────────────────────────────┘ ``` diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 0adccbe888b..b442a782100 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -23,13 +23,53 @@ SELECT └─────────────────────┴────────────┴────────────┴─────────────────────┘ ``` +## timeZone {#timezone} + +Возвращает часовой пояс сервера. + +**Синтаксис** + +``` sql +timeZone() +``` + +Псевдоним: `timezone`. + +**Возвращаемое значение** + +- Часовой пояс. + +Тип: [String](../../sql-reference/data-types/string.md). + ## toTimeZone {#totimezone} -Переводит дату или дату-с-временем в указанный часовой пояс. Часовой пояс (таймзона) это атрибут типов Date/DateTime, внутреннее значение (количество секунд) поля таблицы или колонки результата не изменяется, изменяется тип поля и автоматически его текстовое отображение. +Переводит дату или дату с временем в указанный часовой пояс. Часовой пояс - это атрибут типов `Date` и `DateTime`. Внутреннее значение (количество секунд) поля таблицы или результирующего столбца не изменяется, изменяется тип поля и, соответственно, его текстовое отображение. + +**Синтаксис** + +``` sql +toTimezone(value, timezone) +``` + +Псевдоним: `toTimezone`. + +**Аргументы** + +- `value` — время или дата с временем. [DateTime64](../../sql-reference/data-types/datetime64.md). +- `timezone` — часовой пояс для возвращаемого значения. [String](../../sql-reference/data-types/string.md). + +**Возвращаемое значение** + +- Дата с временем. + +Тип: [DateTime](../../sql-reference/data-types/datetime.md). + +**Пример** + +Запрос: ```sql -SELECT - toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc, +SELECT toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc, toTypeName(time_utc) AS type_utc, toInt32(time_utc) AS int32utc, toTimeZone(time_utc, 'Asia/Yekaterinburg') AS time_yekat, @@ -40,6 +80,7 @@ SELECT toInt32(time_samoa) AS int32samoa FORMAT Vertical; ``` +Результат: ```text Row 1: @@ -57,6 +98,82 @@ int32samoa: 1546300800 `toTimeZone(time_utc, 'Asia/Yekaterinburg')` изменяет тип `DateTime('UTC')` в `DateTime('Asia/Yekaterinburg')`. Значение (unix-время) 1546300800 остается неизменным, но текстовое отображение (результат функции toString()) меняется `time_utc: 2019-01-01 00:00:00` в `time_yekat: 2019-01-01 05:00:00`. +## timeZoneOf {#timezoneof} + +Возвращает название часового пояса для значений типа [DateTime](../../sql-reference/data-types/datetime.md) и [DateTime64](../../sql-reference/data-types/datetime64.md). + +**Синтаксис** + +``` sql +timeZoneOf(value) +``` + +Псевдоним: `timezoneOf`. + +**Аргументы** + +- `value` — Дата с временем. [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). + +**Возвращаемое значение** + +- Название часового пояса. + +Тип: [String](../../sql-reference/data-types/string.md). + +**Пример** + +Запрос: +``` sql +SELECT timezoneOf(now()); +``` + +Результат: +``` text +┌─timezoneOf(now())─┐ +│ Etc/UTC │ +└───────────────────┘ +``` + +## timeZoneOffset {#timezoneoffset} + +Возвращает смещение часового пояса в секундах от [UTC](https://ru.wikipedia.org/wiki/Всемирное_координированное_время). Функция учитывает [летнее время](https://ru.wikipedia.org/wiki/Летнее_время) и исторические изменения часовых поясов, которые действовали на указанную дату. +Для вычисления смещения используется информация из [базы данных IANA](https://www.iana.org/time-zones). + +**Синтаксис** + +``` sql +timeZoneOffset(value) +``` + +Псевдоним: `timezoneOffset`. + +**Аргументы** + +- `value` — Дата с временем. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). + +**Возвращаемое значение** + +- Смещение в секундах от UTC. + +Тип: [Int32](../../sql-reference/data-types/int-uint.md). + +**Пример** + +Запрос: + +``` sql +SELECT toDateTime('2021-04-21 10:20:30', 'Europe/Moscow') AS Time, toTypeName(Time) AS Type, + timeZoneOffset(Time) AS Offset_in_seconds, (Offset_in_seconds / 3600) AS Offset_in_hours; +``` + +Результат: + +``` text +┌────────────────Time─┬─Type──────────────────────┬─Offset_in_seconds─┬─Offset_in_hours─┐ +│ 2021-04-21 10:20:30 │ DateTime('Europe/Moscow') │ 10800 │ 3 │ +└─────────────────────┴───────────────────────────┴───────────────────┴─────────────────┘ +``` + ## toYear {#toyear} Переводит дату или дату-с-временем в число типа UInt16, содержащее номер года (AD). @@ -943,4 +1060,3 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime; │ 2009-02-11 14:42:23 │ └─────────────────────┘ ``` - diff --git a/docs/ru/sql-reference/functions/ext-dict-functions.md b/docs/ru/sql-reference/functions/ext-dict-functions.md index 919f8ebe276..be91145659e 100644 --- a/docs/ru/sql-reference/functions/ext-dict-functions.md +++ b/docs/ru/sql-reference/functions/ext-dict-functions.md @@ -7,21 +7,22 @@ toc_title: "Функции для работы с внешними словар Информацию о подключении и настройке внешних словарей смотрите в разделе [Внешние словари](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). -## dictGet {#dictget} +## dictGet, dictGetOrDefault, dictGetOrNull {#dictget} Извлекает значение из внешнего словаря. ``` sql -dictGet('dict_name', 'attr_name', id_expr) -dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) +dictGet('dict_name', attr_names, id_expr) +dictGetOrDefault('dict_name', attr_names, id_expr, default_value_expr) +dictGetOrNull('dict_name', attr_name, id_expr) ``` **Аргументы** - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). -- `attr_name` — имя столбца словаря. [Строковый литерал](../syntax.md#syntax-string-literal). -- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md) или [Tuple](../../sql-reference/functions/ext-dict-functions.md) в зависимости от конфигурации словаря. -- `default_value_expr` — значение, возвращаемое в том случае, когда словарь не содержит строки с заданным ключом `id_expr`. [Выражение](../syntax.md#syntax-expressions) возвращающее значение с типом данных, сконфигурированным для атрибута `attr_name`. +- `attr_names` — имя столбца словаря, [Строковый литерал](../syntax.md#syntax-string-literal), или кортеж [Tuple](../../sql-reference/data-types/tuple.md) таких имен. +- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md) или [Tuple](../../sql-reference/functions/ext-dict-functions.md), в зависимости от конфигурации словаря. +- `default_value_expr` — значение, возвращаемое в том случае, когда словарь не содержит строки с заданным ключом `id_expr`. [Выражение](../syntax.md#syntax-expressions), возвращающее значение с типом данных, сконфигурированным для атрибута `attr_names`, или кортеж [Tuple](../../sql-reference/data-types/tuple.md) таких выражений. **Возвращаемое значение** @@ -31,10 +32,11 @@ dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dictGet` возвращает содержимое элемента ``, указанного для атрибута в конфигурации словаря. - `dictGetOrDefault` возвращает атрибут `default_value_expr`. + - `dictGetOrNull` возвращает `NULL` в случае, если ключ не найден в словаре. Если значение атрибута не удалось обработать или оно не соответствует типу данных атрибута, то ClickHouse генерирует исключение. -**Пример** +**Пример с единственным атрибутом** Создадим текстовый файл `ext-dict-text.csv` со следующим содержимым: @@ -93,6 +95,130 @@ LIMIT 3 └─────┴────────┘ ``` +**Пример с несколькими атрибутами** + +Создадим текстовый файл `ext-dict-mult.csv` со следующим содержимым: + +``` text +1,1,'1' +2,2,'2' +3,3,'3' +``` + +Первый столбец — `id`, второй столбец — `c1`, третий столбец — `c2`. + +Настройка внешнего словаря: + +``` xml + + + ext-dict-mult + + + /path-to/ext-dict-mult.csv + CSV + + + + + + + + id + + + c1 + UInt32 + + + + c2 + String + + + + 0 + + +``` + +Выполним запрос: + +``` sql +SELECT + dictGet('ext-dict-mult', ('c1','c2'), number) AS val, + toTypeName(val) AS type +FROM system.numbers +LIMIT 3; +``` + +``` text +┌─val─────┬─type──────────────────┐ +│ (1,'1') │ Tuple(UInt8, String) │ +│ (2,'2') │ Tuple(UInt8, String) │ +│ (3,'3') │ Tuple(UInt8, String) │ +└─────────┴───────────────────────┘ +``` + +**Пример для словаря с диапазоном ключей** + +Создадим таблицу: + +```sql +CREATE TABLE range_key_dictionary_source_table +( + key UInt64, + start_date Date, + end_date Date, + value String, + value_nullable Nullable(String) +) +ENGINE = TinyLog(); + +INSERT INTO range_key_dictionary_source_table VALUES(1, toDate('2019-05-20'), toDate('2019-05-20'), 'First', 'First'); +INSERT INTO range_key_dictionary_source_table VALUES(2, toDate('2019-05-20'), toDate('2019-05-20'), 'Second', NULL); +INSERT INTO range_key_dictionary_source_table VALUES(3, toDate('2019-05-20'), toDate('2019-05-20'), 'Third', 'Third'); +``` + +Создадим внешний словарь: + +```sql +CREATE DICTIONARY range_key_dictionary +( + key UInt64, + start_date Date, + end_date Date, + value String, + value_nullable Nullable(String) +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_key_dictionary_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start_date MAX end_date); +``` + +Выполним запрос: + +``` sql +SELECT + (number, toDate('2019-05-20')), + dictHas('range_key_dictionary', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', 'value', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', 'value_nullable', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', ('value', 'value_nullable'), number, toDate('2019-05-20')) +FROM system.numbers LIMIT 5 FORMAT TabSeparated; +``` +Результат: + +``` text +(0,'2019-05-20') 0 \N \N (NULL,NULL) +(1,'2019-05-20') 1 First First ('First','First') +(2,'2019-05-20') 0 \N \N (NULL,NULL) +(3,'2019-05-20') 0 \N \N (NULL,NULL) +(4,'2019-05-20') 0 \N \N (NULL,NULL) +``` + **Смотрите также** - [Внешние словари](../../sql-reference/functions/ext-dict-functions.md) @@ -197,4 +323,3 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dictGet[Type]OrDefault` возвращает аргумент `default_value_expr`. Если значение атрибута не удалось обработать или оно не соответствует типу данных атрибута, то ClickHouse генерирует исключение. - diff --git a/docs/ru/sql-reference/functions/functions-for-nulls.md b/docs/ru/sql-reference/functions/functions-for-nulls.md index 365dba75da7..7285f803264 100644 --- a/docs/ru/sql-reference/functions/functions-for-nulls.md +++ b/docs/ru/sql-reference/functions/functions-for-nulls.md @@ -224,7 +224,7 @@ assumeNotNull(x) **Возвращаемые значения** - Исходное значение с не `Nullable` типом, если оно — не `NULL`. -- Значение по умолчанию для не `Nullable` типа, если исходное значение — `NULL`. +- Неспецифицированный результат, зависящий от реализации, если исходное значение — `NULL`. **Пример** diff --git a/docs/ru/sql-reference/functions/hash-functions.md b/docs/ru/sql-reference/functions/hash-functions.md index 2efff9c3727..07c741e0588 100644 --- a/docs/ru/sql-reference/functions/hash-functions.md +++ b/docs/ru/sql-reference/functions/hash-functions.md @@ -430,7 +430,7 @@ murmurHash3_128( expr ) **Аргументы** -- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа[String](../../sql-reference/functions/hash-functions.md). +- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../sql-reference/functions/hash-functions.md). **Возвращаемое значение** @@ -439,13 +439,13 @@ murmurHash3_128( expr ) **Пример** ``` sql -SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type; +SELECT hex(murmurHash3_128('example_string')) AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` ``` text -┌─MurmurHash3──────┬─type────────────┐ -│ 6�1�4"S5KT�~~q │ FixedString(16) │ -└──────────────────┴─────────────────┘ +┌─MurmurHash3──────────────────────┬─type───┐ +│ 368A1A311CB7342253354B548E7E7E71 │ String │ +└──────────────────────────────────┴────────┘ ``` ## xxHash32, xxHash64 {#hash-functions-xxhash32-xxhash64} diff --git a/docs/ru/sql-reference/functions/json-functions.md b/docs/ru/sql-reference/functions/json-functions.md index 5d419d26981..4de487c03ad 100644 --- a/docs/ru/sql-reference/functions/json-functions.md +++ b/docs/ru/sql-reference/functions/json-functions.md @@ -16,51 +16,65 @@ toc_title: JSON ## visitParamHas(params, name) {#visitparamhasparams-name} -Проверить наличие поля с именем name. +Проверяет наличие поля с именем `name`. + +Алиас: `simpleJSONHas`. ## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name} -Распарсить UInt64 из значения поля с именем name. Если поле строковое - попытаться распарсить число из начала строки. Если такого поля нет, или если оно есть, но содержит не число, то вернуть 0. +Пытается выделить число типа UInt64 из значения поля с именем `name`. Если поле строковое, пытается выделить число из начала строки. Если такого поля нет, или если оно есть, но содержит не число, то возвращает 0. + +Алиас: `simpleJSONExtractUInt`. ## visitParamExtractInt(params, name) {#visitparamextractintparams-name} Аналогично для Int64. +Алиас: `simpleJSONExtractInt`. + ## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name} Аналогично для Float64. +Алиас: `simpleJSONExtractFloat`. + ## visitParamExtractBool(params, name) {#visitparamextractboolparams-name} -Распарсить значение true/false. Результат - UInt8. +Пытается выделить значение true/false. Результат — UInt8. + +Алиас: `simpleJSONExtractBool`. ## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name} -Вернуть значение поля, включая разделители. +Возвращает значение поля, включая разделители. + +Алиас: `simpleJSONExtractRaw`. Примеры: ``` sql -visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' -visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' +visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"'; +visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}'; ``` ## visitParamExtractString(params, name) {#visitparamextractstringparams-name} -Распарсить строку в двойных кавычках. У значения убирается экранирование. Если убрать экранированные символы не удалось, то возвращается пустая строка. +Разбирает строку в двойных кавычках. У значения убирается экранирование. Если убрать экранированные символы не удалось, то возвращается пустая строка. + +Алиас: `simpleJSONExtractString`. Примеры: ``` sql -visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' -visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' -visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' -visitParamExtractString('{"abc":"hello}', 'abc') = '' +visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0'; +visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺'; +visitParamExtractString('{"abc":"\\u263"}', 'abc') = ''; +visitParamExtractString('{"abc":"hello}', 'abc') = ''; ``` -На данный момент, не поддерживаются записанные в формате `\uXXXX\uYYYY` кодовые точки не из basic multilingual plane (они переводятся не в UTF-8, а в CESU-8). +На данный момент не поддерживаются записанные в формате `\uXXXX\uYYYY` кодовые точки не из basic multilingual plane (они переводятся не в UTF-8, а в CESU-8). -Следующие функции используют [simdjson](https://github.com/lemire/simdjson) который разработан под более сложные требования для разбора JSON. Упомянутое выше предположение 2 по-прежнему применимо. +Следующие функции используют [simdjson](https://github.com/lemire/simdjson), который разработан под более сложные требования для разбора JSON. Упомянутое выше допущение 2 по-прежнему применимо. ## isValidJSON(json) {#isvalidjsonjson} @@ -292,4 +306,3 @@ SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello" │ [('d','"hello"'),('f','"world"')] │ └───────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index f9b3e5c3e68..84bbc6af968 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -1133,6 +1133,111 @@ SELECT defaultValueOfTypeName('Nullable(Int8)') └──────────────────────────────────────────┘ ``` +## indexHint {#indexhint} +Возвращает все данные из диапазона, в который попадают данные, соответствующие указанному выражению. +Переданное выражение не будет вычислено. Выбор диапазона производится по индексу. +Индекс в ClickHouse разреженный, при чтении диапазона в ответ попадают «лишние» соседние данные. + +**Синтаксис** + +```sql +SELECT * FROM table WHERE indexHint() +``` + +**Возвращаемое значение** + +Возвращает диапазон индекса, в котором выполняется заданное условие. + +Тип: [Uint8](https://clickhouse.yandex/docs/ru/data_types/int_uint/#diapazony-uint). + +**Пример** + +Рассмотрим пример с использованием тестовых данных таблицы [ontime](../../getting-started/example-datasets/ontime.md). + +Исходная таблица: + +```sql +SELECT count() FROM ontime +``` + +```text +┌─count()─┐ +│ 4276457 │ +└─────────┘ +``` + +В таблице есть индексы по полям `(FlightDate, (Year, FlightDate))`. + +Выполним выборку по дате, где индекс не используется. + +Запрос: + +```sql +SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k +``` + +ClickHouse обработал всю таблицу (`Processed 4.28 million rows`). + +Результат: + +```text +┌──────────k─┬─count()─┐ +│ 2017-01-01 │ 13970 │ +│ 2017-01-02 │ 15882 │ +........................ +│ 2017-09-28 │ 16411 │ +│ 2017-09-29 │ 16384 │ +│ 2017-09-30 │ 12520 │ +└────────────┴─────────┘ +``` + +Для подключения индекса выбираем конкретную дату. + +Запрос: + +```sql +SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k +``` + +При использовании индекса ClickHouse обработал значительно меньшее количество строк (`Processed 32.74 thousand rows`). + +Результат: + +```text +┌──────────k─┬─count()─┐ +│ 2017-09-15 │ 16428 │ +└────────────┴─────────┘ +``` + +Передадим в функцию `indexHint` выражение `k = '2017-09-15'`. + +Запрос: + +```sql +SELECT + FlightDate AS k, + count() +FROM ontime +WHERE indexHint(k = '2017-09-15') +GROUP BY k +ORDER BY k ASC +``` + +ClickHouse применил индекс по аналогии с примером выше (`Processed 32.74 thousand rows`). +Выражение `k = '2017-09-15'` не используется при формировании результата. +Функция `indexHint` позволяет увидеть соседние данные. + +Результат: + +```text +┌──────────k─┬─count()─┐ +│ 2017-09-14 │ 7071 │ +│ 2017-09-15 │ 16428 │ +│ 2017-09-16 │ 1077 │ +│ 2017-09-30 │ 8167 │ +└────────────┴─────────┘ +``` + ## replicate {#other-functions-replicate} Создает массив, заполненный одним значением. diff --git a/docs/ru/sql-reference/functions/string-functions.md b/docs/ru/sql-reference/functions/string-functions.md index 6ef7dc01b6a..04af599c09a 100644 --- a/docs/ru/sql-reference/functions/string-functions.md +++ b/docs/ru/sql-reference/functions/string-functions.md @@ -645,3 +645,66 @@ SELECT decodeXMLComponent('< Σ >'); - [Мнемоники в HTML](https://ru.wikipedia.org/wiki/%D0%9C%D0%BD%D0%B5%D0%BC%D0%BE%D0%BD%D0%B8%D0%BA%D0%B8_%D0%B2_HTML) + + +## extractTextFromHTML {#extracttextfromhtml} + +Функция для извлечения текста из HTML или XHTML. +Она не соответствует всем HTML, XML или XHTML стандартам на 100%, но ее реализация достаточно точная и быстрая. Правила обработки следующие: + +1. Комментарии удаляются. Пример: ``. Комментарий должен оканчиваться символами `-->`. Вложенные комментарии недопустимы. +Примечание: конструкции наподобие `` и `` не являются допустимыми комментариями в HTML, но они будут удалены согласно другим правилам. +2. Содержимое CDATA вставляется дословно. Примечание: формат CDATA специфичен для XML/XHTML. Но он обрабатывается всегда по принципу "наилучшего возможного результата". +3. Элементы `script` и `style` удаляются вместе со всем содержимым. Примечание: предполагается, что закрывающий тег не может появиться внутри содержимого. Например, в JS строковый литерал должен быть экранирован как `"<\/script>"`. +Примечание: комментарии и CDATA возможны внутри `script` или `style` - тогда закрывающие теги не ищутся внутри CDATA. Пример: `]]>`. Но они ищутся внутри комментариев. Иногда возникают сложные случаи: ` var y = "-->"; alert(x + y);` +Примечание: `script` и `style` могут быть названиями пространств имен XML - тогда они не обрабатываются как обычные элементы `script` или `style`. Пример: `Hello`. +Примечание: пробелы возможны после имени закрывающего тега: ``, но не перед ним: `< / script>`. +4. Другие теги или элементы, подобные тегам, удаляются, а их внутреннее содержимое остается. Пример: `.` +Примечание: ожидается, что такой HTML является недопустимым: `` +Примечание: функция также удаляет подобные тегам элементы: `<>`, ``, и т. д. +Примечание: если встречается тег без завершающего символа `>`, то удаляется этот тег и весь следующий за ним текст: `world`, `Helloworld` — в HTML нет пробелов, но функция вставляет их. Также следует учитывать такие варианты написания: `Hello

world

`, `Hello
world`. Подобные результаты выполнения функции могут использоваться для анализа данных, например, для преобразования HTML-текста в набор используемых слов. +7. Также обратите внимание, что правильная обработка пробелов требует поддержки `
` и свойств CSS `display` и `white-space`.
+
+**Синтаксис**
+
+``` sql
+extractTextFromHTML(x)
+```
+
+**Аргументы**
+
+-   `x` — текст для обработки. [String](../../sql-reference/data-types/string.md). 
+
+**Возвращаемое значение**
+
+-   Извлеченный текст.
+
+Тип: [String](../../sql-reference/data-types/string.md).
+
+**Пример**
+
+Первый пример содержит несколько тегов и комментарий. На этом примере также видно, как обрабатываются пробелы.
+Второй пример показывает обработку `CDATA` и тега `script`.
+В третьем примере текст выделяется из полного HTML ответа, полученного с помощью функции [url](../../sql-reference/table-functions/url.md).
+
+Запрос:
+
+``` sql
+SELECT extractTextFromHTML(' 

A text withtags.

'); +SELECT extractTextFromHTML('CDATA]]> '); +SELECT extractTextFromHTML(html) FROM url('http://www.donothingfor2minutes.com/', RawBLOB, 'html String'); +``` + +Результат: + +``` text +A text with tags . +The content within CDATA +Do Nothing for 2 Minutes 2:00   +``` diff --git a/docs/ru/sql-reference/statements/alter/column.md b/docs/ru/sql-reference/statements/alter/column.md index 87fc1c78cd0..158ab2e7385 100644 --- a/docs/ru/sql-reference/statements/alter/column.md +++ b/docs/ru/sql-reference/statements/alter/column.md @@ -63,6 +63,9 @@ DROP COLUMN [IF EXISTS] name Запрос удаляет данные из файловой системы. Так как это представляет собой удаление целых файлов, запрос выполняется почти мгновенно. +!!! warning "Предупреждение" + Вы не можете удалить столбец, используемый в [материализованном представлениии](../../../sql-reference/statements/create/view.md#materialized). В противном случае будет ошибка. + Пример: ``` sql @@ -155,7 +158,7 @@ ALTER TABLE table_name MODIFY column_name REMOVE property; ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; ``` -## Смотрите также +**Смотрите также** - [REMOVE TTL](ttl.md). diff --git a/docs/ru/sql-reference/statements/alter/index/index.md b/docs/ru/sql-reference/statements/alter/index/index.md index 862def5cc04..632f11ed906 100644 --- a/docs/ru/sql-reference/statements/alter/index/index.md +++ b/docs/ru/sql-reference/statements/alter/index/index.md @@ -9,8 +9,9 @@ toc_title: "Манипуляции с индексами" Добавить или удалить индекс можно с помощью операций ``` sql -ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value [AFTER name] -ALTER TABLE [db].name DROP INDEX name +ALTER TABLE [db.]name ADD INDEX name expression TYPE type GRANULARITY value [AFTER name] +ALTER TABLE [db.]name DROP INDEX name +ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name ``` Поддерживается только таблицами семейства `*MergeTree`. @@ -18,6 +19,7 @@ ALTER TABLE [db].name DROP INDEX name Команда `ADD INDEX` добавляет описание индексов в метаданные, а `DROP INDEX` удаляет индекс из метаданных и стирает файлы индекса с диска, поэтому они легковесные и работают мгновенно. Если индекс появился в метаданных, то он начнет считаться в последующих слияниях и записях в таблицу, а не сразу после выполнения операции `ALTER`. +`MATERIALIZE INDEX` - перестраивает индекс в указанной партиции. Реализовано как мутация. Запрос на изменение индексов реплицируется, сохраняя новые метаданные в ZooKeeper и применяя изменения на всех репликах. diff --git a/docs/ru/sql-reference/statements/alter/partition.md b/docs/ru/sql-reference/statements/alter/partition.md index 3e7b069b066..02a87406e86 100644 --- a/docs/ru/sql-reference/statements/alter/partition.md +++ b/docs/ru/sql-reference/statements/alter/partition.md @@ -38,7 +38,7 @@ ALTER TABLE mt DETACH PART 'all_2_2_0'; После того как запрос будет выполнен, вы сможете производить любые операции с данными в директории `detached`. Например, можно удалить их из файловой системы. -Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплики-лидера, запрос вернет ошибку. +Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплик-лидеров (поскольку допускается несколько лидеров), запрос вернет ошибку. ## DROP PARTITION\|PART {#alter_drop-partition} @@ -83,9 +83,13 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0; Как корректно задать имя партиции или куска, см. в разделе [Как задавать имя партиции в запросах ALTER](#alter-how-to-specify-part-expr). -Этот запрос реплицируется. Реплика-иницатор проверяет, есть ли данные в директории `detached`. Если данные есть, то запрос проверяет их целостность. В случае успеха данные добавляются в таблицу. Все остальные реплики загружают данные с реплики-инициатора запроса. +Этот запрос реплицируется. Реплика-иницатор проверяет, есть ли данные в директории `detached`. +Если данные есть, то запрос проверяет их целостность. В случае успеха данные добавляются в таблицу. -Это означает, что вы можете разместить данные в директории `detached` на одной реплике и с помощью запроса `ALTER ... ATTACH` добавить их в таблицу на всех репликах. +Если реплика, не являющаяся инициатором запроса, получив команду присоединения, находит кусок с правильными контрольными суммами в своей собственной папке `detached`, она присоединяет данные, не скачивая их с других реплик. +Если нет куска с правильными контрольными суммами, данные загружаются из любой реплики, имеющей этот кусок. + +Вы можете поместить данные в директорию `detached` на одной реплике и с помощью запроса `ALTER ... ATTACH` добавить их в таблицу на всех репликах. ## ATTACH PARTITION FROM {#alter_attach-partition-from} @@ -93,7 +97,8 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0; ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 ``` -Копирует партицию из таблицы `table1` в таблицу `table2` и добавляет к существующим данным `table2`. Данные из `table1` не удаляются. +Копирует партицию из таблицы `table1` в таблицу `table2`. +Обратите внимание, что данные не удаляются ни из `table1`, ни из `table2`. Следует иметь в виду: @@ -305,4 +310,3 @@ OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; `IN PARTITION` указывает на партицию, для которой применяются выражения [UPDATE](../../../sql-reference/statements/alter/update.md#alter-table-update-statements) или [DELETE](../../../sql-reference/statements/alter/delete.md#alter-mutations) в результате запроса `ALTER TABLE`. Новые куски создаются только в указанной партиции. Таким образом, `IN PARTITION` помогает снизить нагрузку, когда таблица разбита на множество партиций, а вам нужно обновить данные лишь точечно. Примеры запросов `ALTER ... PARTITION` можно посмотреть в тестах: [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) и [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). - diff --git a/docs/ru/sql-reference/statements/alter/ttl.md b/docs/ru/sql-reference/statements/alter/ttl.md index e949c992bbe..2a2d10b69de 100644 --- a/docs/ru/sql-reference/statements/alter/ttl.md +++ b/docs/ru/sql-reference/statements/alter/ttl.md @@ -82,4 +82,4 @@ SELECT * FROM table_with_ttl; ### Смотрите также - Подробнее о [свойстве TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl). - +- Изменить столбец [с TTL](../../../sql-reference/statements/alter/column.md#alter_modify-column). \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/alter/user.md b/docs/ru/sql-reference/statements/alter/user.md index 53d090f8eab..bb57c3bb328 100644 --- a/docs/ru/sql-reference/statements/alter/user.md +++ b/docs/ru/sql-reference/statements/alter/user.md @@ -15,17 +15,29 @@ ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1] [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}] [[ADD | DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] + [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...] ``` Для выполнения `ALTER USER` необходима привилегия [ALTER USER](../grant.md#grant-access-management). +## Секция GRANTEES {#grantees} + +Определяет пользователей или роли, которым разрешено получать [привилегии](../../../sql-reference/statements/grant.md#grant-privileges) от указанного пользователя при условии, что этому пользователю также предоставлен весь необходимый доступ с использованием [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Параметры секции `GRANTEES`: + +- `user` — пользователь, которому разрешено получать привилегии от указанного пользователя. +- `role` — роль, которой разрешено получать привилегии от указанного пользователя. +- `ANY` — любому пользователю или любой роли разрешено получать привилегии от указанного пользователя. Используется по умолчанию. +- `NONE` — никому не разрешено получать привилегии от указанного пользователя. + +Вы можете исключить любого пользователя или роль, используя выражение `EXCEPT`. Например, `ALTER USER user1 GRANTEES ANY EXCEPT user2`. Это означает, что если `user1` имеет привилегии, предоставленные с использованием `GRANT OPTION`, он сможет предоставить их любому, кроме `user2`. + ## Примеры {#alter-user-examples} Установить ролями по умолчанию роли, назначенные пользователю: ``` sql -ALTER USER user DEFAULT ROLE role1, role2 +ALTER USER user DEFAULT ROLE role1, role2; ``` Если роли не были назначены пользователю, ClickHouse выбрасывает исключение. @@ -33,7 +45,7 @@ ALTER USER user DEFAULT ROLE role1, role2 Установить ролями по умолчанию все роли, назначенные пользователю: ``` sql -ALTER USER user DEFAULT ROLE ALL +ALTER USER user DEFAULT ROLE ALL; ``` Если роль будет впоследствии назначена пользователю, она автоматически станет ролью по умолчанию. @@ -41,6 +53,11 @@ ALTER USER user DEFAULT ROLE ALL Установить ролями по умолчанию все назначенные пользователю роли кроме `role1` и `role2`: ``` sql -ALTER USER user DEFAULT ROLE ALL EXCEPT role1, role2 +ALTER USER user DEFAULT ROLE ALL EXCEPT role1, role2; ``` +Разрешить пользователю с аккаунтом `john` предоставить свои привилегии пользователю с аккаунтом `jack`: + +``` sql +ALTER USER john GRANTEES jack; +``` diff --git a/docs/ru/sql-reference/statements/create/row-policy.md b/docs/ru/sql-reference/statements/create/row-policy.md index 88709598906..6fe1dc45815 100644 --- a/docs/ru/sql-reference/statements/create/row-policy.md +++ b/docs/ru/sql-reference/statements/create/row-policy.md @@ -5,7 +5,7 @@ toc_title: "Политика доступа" # CREATE ROW POLICY {#create-row-policy-statement} -Создает [фильтры для строк](../../../operations/access-rights.md#row-policy-management), которые пользователь может прочесть из таблицы. +Создает [политики доступа к строкам](../../../operations/access-rights.md#row-policy-management), т.е. фильтры, которые определяют, какие строки пользователь может читать из таблицы. Синтаксис: @@ -13,33 +13,74 @@ toc_title: "Политика доступа" CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1 [, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...] [AS {PERMISSIVE | RESTRICTIVE}] - [FOR SELECT] - [USING condition] + [FOR SELECT] USING condition [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] ``` -Секция `ON CLUSTER` позволяет создавать фильтры для строк на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md). +## Секция USING {#create-row-policy-using} -## Секция AS {#create-row-policy-as} - -С помощью данной секции можно создать политику разрешения или ограничения. - -Политика разрешения предоставляет доступ к строкам. Разрешительные политики, которые применяются к одной таблице, объединяются с помощью логического оператора `OR`. Политики являются разрешительными по умолчанию. - -Политика ограничения запрещает доступ к строкам. Ограничительные политики, которые применяются к одной таблице, объединяются логическим оператором `AND`. - -Ограничительные политики применяются к строкам, прошедшим фильтр разрешительной политики. Если вы не зададите разрешительные политики, пользователь не сможет обращаться ни к каким строкам из таблицы. +Секция `USING` указывает условие для фильтрации строк. Пользователь может видеть строку, если это условие, вычисленное для строки, дает ненулевой результат. ## Секция TO {#create-row-policy-to} -В секции `TO` вы можете перечислить как роли, так и пользователей. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`. +В секции `TO` перечисляются пользователи и роли, для которых должна действовать политика. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`. Ключевым словом `ALL` обозначаются все пользователи, включая текущего. Ключевые слова `ALL EXCEPT` позволяют исключить пользователей из списка всех пользователей. Например, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` +!!! note "Note" + Если для таблицы не задано ни одной политики доступа к строкам, то любой пользователь может выполнить команду SELECT и получить все строки таблицы. Если определить хотя бы одну политику для таблицы, до доступ к строкам будет управляться этими политиками, причем для всех пользователей (даже для тех, для кого политики не определялись). Например, следующая политика + + `CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter` + + запретит пользователям `mira` и `peter` видеть строки с `b != 1`, и еще запретит всем остальным пользователям (например, пользователю `paul`) видеть какие-либо строки вообще из таблицы `mydb.table1`. + + Если это нежелательно, такое поведение можно исправить, определив дополнительную политику: + + `CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter` + +## Секция AS {#create-row-policy-as} + +Может быть одновременно активно более одной политики для одной и той же таблицы и одного и того же пользователя. Поэтому нам нужен способ комбинировать политики. + +По умолчанию политики комбинируются с использованием логического оператора `OR`. Например, политики: + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio +``` + +разрешат пользователю с именем `peter` видеть строки, для которых будет верно `b=1` или `c=2`. + +Секция `AS` указывает, как политики должны комбинироваться с другими политиками. Политики могут быть или разрешительными (`PERMISSIVE`), или ограничительными (`RESTRICTIVE`). По умолчанию политики создаются разрешительными (`PERMISSIVE`); такие политики комбинируются с использованием логического оператора `OR`. + +Ограничительные (`RESTRICTIVE`) политики комбинируются с использованием логического оператора `AND`. + +Общая формула выглядит так: + +``` +строка_видима = (одна или больше permissive-политик дала ненулевой результат проверки условия) И + (все restrictive-политики дали ненулевой результат проверки условия) +``` + +Например, политики + +``` sql +CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter +CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio +``` + +разрешат пользователю с именем `peter` видеть только те строки, для которых будет одновременно `b=1` и `c=2`. + +## Секция ON CLUSTER {#create-row-policy-on-cluster} + +Секция `ON CLUSTER` позволяет создавать политики на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md). + ## Примеры -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost` +`CREATE ROW POLICY filter1 ON mydb.mytable USING a<1000 TO accountant, john@localhost` -`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira` +`CREATE ROW POLICY filter2 ON mydb.mytable USING a<1000 AND b=5 TO ALL EXCEPT mira` + +`CREATE ROW POLICY filter3 ON mydb.mytable USING 1 TO admin` \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/create/table.md b/docs/ru/sql-reference/statements/create/table.md index b998435bcd8..1ccd0a600f3 100644 --- a/docs/ru/sql-reference/statements/create/table.md +++ b/docs/ru/sql-reference/statements/create/table.md @@ -46,15 +46,32 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() ### Из запроса SELECT {#from-select-query} ``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... +CREATE TABLE [IF NOT EXISTS] [db.]table_name[(name1 [type1], name2 [type2], ...)] ENGINE = engine AS SELECT ... ``` -Создаёт таблицу со структурой, как результат запроса `SELECT`, с движком engine, и заполняет её данными из SELECT-а. +Создаёт таблицу со структурой, как результат запроса `SELECT`, с движком `engine`, и заполняет её данными из `SELECT`. Также вы можете явно задать описание столбцов. -Во всех случаях, если указано `IF NOT EXISTS`, то запрос не будет возвращать ошибку, если таблица уже существует. В этом случае, запрос будет ничего не делать. +Если таблица уже существует и указано `IF NOT EXISTS`, то запрос ничего не делает. После секции `ENGINE` в запросе могут использоваться и другие секции в зависимости от движка. Подробную документацию по созданию таблиц смотрите в описаниях [движков таблиц](../../../engines/table-engines/index.md#table_engines). +**Пример** + +Запрос: + +``` sql +CREATE TABLE t1 (x String) ENGINE = Memory AS SELECT 1; +SELECT x, toTypeName(x) FROM t1; +``` + +Результат: + +```text +┌─x─┬─toTypeName(x)─┐ +│ 1 │ String │ +└───┴───────────────┘ +``` + ## Модификатор NULL или NOT NULL {#null-modifiers} Модификатор `NULL` или `NOT NULL`, указанный после типа данных в определении столбца, позволяет или не позволяет типу данных быть [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable). @@ -230,7 +247,7 @@ CREATE TABLE codec_example ) ENGINE = MergeTree() ``` -## Временные таблицы {#vremennye-tablitsy} +## Временные таблицы {#temporary-tables} ClickHouse поддерживает временные таблицы со следующими характеристиками: diff --git a/docs/ru/sql-reference/statements/create/user.md b/docs/ru/sql-reference/statements/create/user.md index a487d1ac593..ea64bff061b 100644 --- a/docs/ru/sql-reference/statements/create/user.md +++ b/docs/ru/sql-reference/statements/create/user.md @@ -15,6 +15,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}] [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [DEFAULT ROLE role [,...]] + [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY | WRITABLE] | PROFILE 'profile_name'] [,...] ``` @@ -24,43 +25,52 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] Существует несколько способов идентификации пользователя: -- `IDENTIFIED WITH no_password` -- `IDENTIFIED WITH plaintext_password BY 'qwerty'` -- `IDENTIFIED WITH sha256_password BY 'qwerty'` or `IDENTIFIED BY 'password'` -- `IDENTIFIED WITH sha256_hash BY 'hash'` -- `IDENTIFIED WITH double_sha1_password BY 'qwerty'` -- `IDENTIFIED WITH double_sha1_hash BY 'hash'` -- `IDENTIFIED WITH ldap SERVER 'server_name'` -- `IDENTIFIED WITH kerberos` or `IDENTIFIED WITH kerberos REALM 'realm'` +- `IDENTIFIED WITH no_password` +- `IDENTIFIED WITH plaintext_password BY 'qwerty'` +- `IDENTIFIED WITH sha256_password BY 'qwerty'` or `IDENTIFIED BY 'password'` +- `IDENTIFIED WITH sha256_hash BY 'hash'` +- `IDENTIFIED WITH double_sha1_password BY 'qwerty'` +- `IDENTIFIED WITH double_sha1_hash BY 'hash'` +- `IDENTIFIED WITH ldap SERVER 'server_name'` +- `IDENTIFIED WITH kerberos` or `IDENTIFIED WITH kerberos REALM 'realm'` ## Пользовательский хост Пользовательский хост — это хост, с которого можно установить соединение с сервером ClickHouse. Хост задается в секции `HOST` следующими способами: -- `HOST IP 'ip_address_or_subnetwork'` — Пользователь может подключиться к серверу ClickHouse только с указанного IP-адреса или [подсети](https://ru.wikipedia.org/wiki/Подсеть). Примеры: `HOST IP '192.168.0.0/16'`, `HOST IP '2001:DB8::/32'`. При использовании в эксплуатации указывайте только элементы `HOST IP` (IP-адреса и маски подсети), так как использование `host` и `host_regexp` может привести к дополнительной задержке. -- `HOST ANY` — Пользователь может подключиться с любого хоста. Используется по умолчанию. -- `HOST LOCAL` — Пользователь может подключиться только локально. -- `HOST NAME 'fqdn'` — Хост задается через FQDN. Например, `HOST NAME 'mysite.com'`. -- `HOST NAME REGEXP 'regexp'` — Позволяет использовать регулярные выражения [pcre](http://www.pcre.org/), чтобы задать хосты. Например, `HOST NAME REGEXP '.*\.mysite\.com'`. -- `HOST LIKE 'template'` — Позволяет использовать оператор [LIKE](../../functions/string-search-functions.md#function-like) для фильтрации хостов. Например, `HOST LIKE '%'` эквивалентен `HOST ANY`; `HOST LIKE '%.mysite.com'` разрешает подключение со всех хостов в домене `mysite.com`. +- `HOST IP 'ip_address_or_subnetwork'` — Пользователь может подключиться к серверу ClickHouse только с указанного IP-адреса или [подсети](https://ru.wikipedia.org/wiki/Подсеть). Примеры: `HOST IP '192.168.0.0/16'`, `HOST IP '2001:DB8::/32'`. При использовании в эксплуатации указывайте только элементы `HOST IP` (IP-адреса и маски подсети), так как использование `host` и `host_regexp` может привести к дополнительной задержке. +- `HOST ANY` — Пользователь может подключиться с любого хоста. Используется по умолчанию. +- `HOST LOCAL` — Пользователь может подключиться только локально. +- `HOST NAME 'fqdn'` — Хост задается через FQDN. Например, `HOST NAME 'mysite.com'`. +- `HOST NAME REGEXP 'regexp'` — Позволяет использовать регулярные выражения [pcre](http://www.pcre.org/), чтобы задать хосты. Например, `HOST NAME REGEXP '.*\.mysite\.com'`. +- `HOST LIKE 'template'` — Позволяет использовать оператор [LIKE](../../functions/string-search-functions.md#function-like) для фильтрации хостов. Например, `HOST LIKE '%'` эквивалентен `HOST ANY`; `HOST LIKE '%.mysite.com'` разрешает подключение со всех хостов в домене `mysite.com`. Также, чтобы задать хост, вы можете использовать `@` вместе с именем пользователя. Примеры: -- `CREATE USER mira@'127.0.0.1'` — Эквивалентно `HOST IP`. -- `CREATE USER mira@'localhost'` — Эквивалентно `HOST LOCAL`. -- `CREATE USER mira@'192.168.%.%'` — Эквивалентно `HOST LIKE`. +- `CREATE USER mira@'127.0.0.1'` — Эквивалентно `HOST IP`. +- `CREATE USER mira@'localhost'` — Эквивалентно `HOST LOCAL`. +- `CREATE USER mira@'192.168.%.%'` — Эквивалентно `HOST LIKE`. !!! info "Внимание" ClickHouse трактует конструкцию `user_name@'address'` как имя пользователя целиком. То есть технически вы можете создать несколько пользователей с одинаковыми `user_name`, но разными частями конструкции после `@`, но лучше так не делать. + +## Секция GRANTEES {#grantees} +Указываются пользователи или роли, которым разрешено получать [привилегии](../../../sql-reference/statements/grant.md#grant-privileges) от создаваемого пользователя при условии, что этому пользователю также предоставлен весь необходимый доступ с использованием [GRANT OPTION](../../../sql-reference/statements/grant.md#grant-privigele-syntax). Параметры секции `GRANTEES`: + +- `user` — указывается пользователь, которому разрешено получать привилегии от создаваемого пользователя. +- `role` — указывается роль, которой разрешено получать привилегии от создаваемого пользователя. +- `ANY` — любому пользователю или любой роли разрешено получать привилегии от создаваемого пользователя. Используется по умолчанию. +- `NONE` — никому не разрешено получать привилегии от создаваемого пользователя. + +Вы можете исключить любого пользователя или роль, используя выражение `EXCEPT`. Например, `CREATE USER user1 GRANTEES ANY EXCEPT user2`. Это означает, что если `user1` имеет привилегии, предоставленные с использованием `GRANT OPTION`, он сможет предоставить их любому, кроме `user2`. ## Примеры {#create-user-examples} - Создать аккаунт `mira`, защищенный паролем `qwerty`: ```sql -CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty' +CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty'; ``` Пользователь `mira` должен запустить клиентское приложение на хосте, где запущен ClickHouse. @@ -68,13 +78,13 @@ CREATE USER mira HOST IP '127.0.0.1' IDENTIFIED WITH sha256_password BY 'qwerty' Создать аккаунт `john`, назначить на него роли, сделать данные роли ролями по умолчанию: ``` sql -CREATE USER john DEFAULT ROLE role1, role2 +CREATE USER john DEFAULT ROLE role1, role2; ``` Создать аккаунт `john` и установить ролями по умолчанию все его будущие роли: ``` sql -CREATE USER user DEFAULT ROLE ALL +CREATE USER john DEFAULT ROLE ALL; ``` Когда роль будет назначена аккаунту `john`, она автоматически станет ролью по умолчанию. @@ -82,7 +92,11 @@ CREATE USER user DEFAULT ROLE ALL Создать аккаунт `john` и установить ролями по умолчанию все его будущие роли, кроме `role1` и `role2`: ``` sql -CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2 +CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2; ``` - \ No newline at end of file +Создать пользователя с аккаунтом `john` и разрешить ему предоставить свои привилегии пользователю с аккаунтом `jack`: + +``` sql +CREATE USER john GRANTEES jack; +``` diff --git a/docs/ru/sql-reference/statements/detach.md b/docs/ru/sql-reference/statements/detach.md index d707acd7ccf..af915d38772 100644 --- a/docs/ru/sql-reference/statements/detach.md +++ b/docs/ru/sql-reference/statements/detach.md @@ -10,7 +10,7 @@ toc_title: DETACH Синтаксис: ``` sql -DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] +DETACH TABLE|VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster] [PERMANENTLY] ``` Но ни данные, ни метаданные таблицы или материализованного представления не удаляются. При следующем запуске сервера, если не было использовано `PERMANENTLY`, сервер прочитает метаданные и снова узнает о таблице/представлении. Если таблица или представление были отключены перманентно, сервер не подключит их обратно автоматически. diff --git a/docs/ru/sql-reference/statements/explain.md b/docs/ru/sql-reference/statements/explain.md new file mode 100644 index 00000000000..c2a35f1b925 --- /dev/null +++ b/docs/ru/sql-reference/statements/explain.md @@ -0,0 +1,168 @@ +--- +toc_priority: 39 +toc_title: EXPLAIN +--- + +# EXPLAIN {#explain} + +Выводит план выполнения запроса. + +Синтаксис: + +```sql +EXPLAIN [AST | SYNTAX | PLAN | PIPELINE] [setting = value, ...] SELECT ... [FORMAT ...] +``` + +Пример: + +```sql +EXPLAIN SELECT sum(number) FROM numbers(10) UNION ALL SELECT sum(number) FROM numbers(10) ORDER BY sum(number) ASC FORMAT TSV; +``` + +```sql +Union + Expression (Projection) + Expression (Before ORDER BY and SELECT) + Aggregating + Expression (Before GROUP BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + Expression (Projection) + MergingSorted (Merge sorted streams for ORDER BY) + MergeSorting (Merge sorted blocks for ORDER BY) + PartialSorting (Sort each block for ORDER BY) + Expression (Before ORDER BY and SELECT) + Aggregating + Expression (Before GROUP BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) +``` + +## Типы EXPLAIN {#explain-types} + +- `AST` — абстрактное синтаксическое дерево. +- `SYNTAX` — текст запроса после оптимизации на уровне AST. +- `PLAN` — план выполнения запроса. +- `PIPELINE` — конвейер выполнения запроса. + +### EXPLAIN AST {#explain-ast} + +Дамп AST запроса. Поддерживает все типы запросов, не только `SELECT`. + +Примеры: + +```sql +EXPLAIN AST SELECT 1; +``` + +```sql +SelectWithUnionQuery (children 1) + ExpressionList (children 1) + SelectQuery (children 1) + ExpressionList (children 1) + Literal UInt64_1 +``` + +```sql +EXPLAIN AST ALTER TABLE t1 DELETE WHERE date = today(); +``` + +```sql + explain + AlterQuery t1 (children 1) + ExpressionList (children 1) + AlterCommand 27 (children 1) + Function equals (children 1) + ExpressionList (children 2) + Identifier date + Function today (children 1) + ExpressionList +``` + +### EXPLAIN SYNTAX {#explain-syntax} + +Возвращает текст запроса после применения синтаксических оптимизаций. + +Пример: + +```sql +EXPLAIN SYNTAX SELECT * FROM system.numbers AS a, system.numbers AS b, system.numbers AS c; +``` + +```sql +SELECT + `--a.number` AS `a.number`, + `--b.number` AS `b.number`, + number AS `c.number` +FROM +( + SELECT + number AS `--a.number`, + b.number AS `--b.number` + FROM system.numbers AS a + CROSS JOIN system.numbers AS b +) AS `--.s` +CROSS JOIN system.numbers AS c +``` + +### EXPLAIN PLAN {#explain-plan} + +Дамп шагов выполнения запроса. + +Настройки: + +- `header` — выводит выходной заголовок для шага. По умолчанию: 0. +- `description` — выводит описание шага. По умолчанию: 1. +- `actions` — выводит подробную информацию о действиях, выполняемых на данном шаге. По умолчанию: 0. + +Пример: + +```sql +EXPLAIN SELECT sum(number) FROM numbers(10) GROUP BY number % 4; +``` + +```sql +Union + Expression (Projection) + Expression (Before ORDER BY and SELECT) + Aggregating + Expression (Before GROUP BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) +``` + +!!! note "Примечание" + Оценка стоимости выполнения шага и запроса не поддерживается. + +### EXPLAIN PIPELINE {#explain-pipeline} + +Настройки: + +- `header` — выводит заголовок для каждого выходного порта. По умолчанию: 0. +- `graph` — выводит граф, описанный на языке [DOT](https://ru.wikipedia.org/wiki/DOT_(язык)). По умолчанию: 0. +- `compact` — выводит граф в компактном режиме, если включена настройка `graph`. По умолчанию: 1. +- `indexes` — показывает используемые индексы, количество отфильтрованных кусков и гранул для каждого примененного индекса. По умолчанию: 0. Поддерживается для таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). + +Пример: + +```sql +EXPLAIN PIPELINE SELECT sum(number) FROM numbers_mt(100000) GROUP BY number % 4; +``` + +```sql +(Union) +(Expression) +ExpressionTransform + (Expression) + ExpressionTransform + (Aggregating) + Resize 2 → 1 + AggregatingTransform × 2 + (Expression) + ExpressionTransform × 2 + (SettingQuotaAndLimits) + (ReadFromStorage) + NumbersMt × 2 0 → 1 +``` + +[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/explain/) diff --git a/docs/ru/sql-reference/statements/grant.md b/docs/ru/sql-reference/statements/grant.md index 7b2d26902ef..093e6eb3b93 100644 --- a/docs/ru/sql-reference/statements/grant.md +++ b/docs/ru/sql-reference/statements/grant.md @@ -93,7 +93,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - `ALTER ADD CONSTRAINT` - `ALTER DROP CONSTRAINT` - `ALTER TTL` - - `ALTER MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL` - `ALTER SETTINGS` - `ALTER MOVE PARTITION` - `ALTER FETCH PARTITION` @@ -104,9 +104,9 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - [CREATE](#grant-create) - `CREATE DATABASE` - `CREATE TABLE` + - `CREATE TEMPORARY TABLE` - `CREATE VIEW` - `CREATE DICTIONARY` - - `CREATE TEMPORARY TABLE` - [DROP](#grant-drop) - `DROP DATABASE` - `DROP TABLE` @@ -152,7 +152,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - `SYSTEM RELOAD` - `SYSTEM RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES` - `SYSTEM TTL MERGES` - `SYSTEM FETCHES` @@ -279,7 +279,7 @@ GRANT INSERT(x,y) ON db.table TO john - `ALTER ADD CONSTRAINT`. Уровень: `TABLE`. Алиасы: `ADD CONSTRAINT` - `ALTER DROP CONSTRAINT`. Уровень: `TABLE`. Алиасы: `DROP CONSTRAINT` - `ALTER TTL`. Уровень: `TABLE`. Алиасы: `ALTER MODIFY TTL`, `MODIFY TTL` - - `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL` + - `ALTER MATERIALIZE TTL`. Уровень: `TABLE`. Алиасы: `MATERIALIZE TTL` - `ALTER SETTINGS`. Уровень: `TABLE`. Алиасы: `ALTER SETTING`, `ALTER MODIFY SETTING`, `MODIFY SETTING` - `ALTER MOVE PARTITION`. Уровень: `TABLE`. Алиасы: `ALTER MOVE PART`, `MOVE PARTITION`, `MOVE PART` - `ALTER FETCH PARTITION`. Уровень: `TABLE`. Алиасы: `FETCH PARTITION` @@ -307,9 +307,9 @@ GRANT INSERT(x,y) ON db.table TO john - `CREATE`. Уровень: `GROUP` - `CREATE DATABASE`. Уровень: `DATABASE` - `CREATE TABLE`. Уровень: `TABLE` + - `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL` - `CREATE VIEW`. Уровень: `VIEW` - `CREATE DICTIONARY`. Уровень: `DICTIONARY` - - `CREATE TEMPORARY TABLE`. Уровень: `GLOBAL` **Дополнительно** @@ -407,7 +407,7 @@ GRANT INSERT(x,y) ON db.table TO john - `SYSTEM RELOAD`. Уровень: `GROUP` - `SYSTEM RELOAD CONFIG`. Уровень: `GLOBAL`. Алиасы: `RELOAD CONFIG` - `SYSTEM RELOAD DICTIONARY`. Уровень: `GLOBAL`. Алиасы: `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARY`, `RELOAD DICTIONARIES` - - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Уровень: `GLOBAL`. Алиасы: `RELOAD EMBEDDED DICTIONARIES` + - `SYSTEM RELOAD EMBEDDED DICTIONARIES`. Уровень: `GLOBAL`. Алиасы: `RELOAD EMBEDDED DICTIONARIES` - `SYSTEM MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, `START MERGES` - `SYSTEM TTL MERGES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, `START TTL MERGES` - `SYSTEM FETCHES`. Уровень: `TABLE`. Алиасы: `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, `START FETCHES` diff --git a/docs/ru/sql-reference/statements/optimize.md b/docs/ru/sql-reference/statements/optimize.md index 44101910a6c..e1a9d613537 100644 --- a/docs/ru/sql-reference/statements/optimize.md +++ b/docs/ru/sql-reference/statements/optimize.md @@ -5,19 +5,83 @@ toc_title: OPTIMIZE # OPTIMIZE {#misc_operations-optimize} -``` sql -OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] -``` - -Запрос пытается запустить внеплановый мёрж кусков данных для таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). Другие движки таблиц не поддерживаются. - -Если `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md), ClickHouse создаёт задачу на мёрж и ожидает её исполнения на всех узлах (если активирована настройка `replication_alter_partitions_sync`). - -- Если `OPTIMIZE` не выполняет мёрж по любой причине, ClickHouse не оповещает об этом клиента. Чтобы включить оповещения, используйте настройку [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop). -- Если указать `PARTITION`, то оптимизация выполняется только для указанной партиции. [Как задавать имя партиции в запросах](alter/index.md#alter-how-to-specify-part-expr). -- Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске. Кроме того, слияние является принудительным, даже если выполняются параллельные слияния. -- Если указать `DEDUPLICATE`, то произойдет схлопывание полностью одинаковых строк (сравниваются значения во всех колонках), имеет смысл только для движка MergeTree. +Запрос пытается запустить внеплановое слияние кусков данных для таблиц. !!! warning "Внимание" - Запрос `OPTIMIZE` не может устранить причину появления ошибки «Too many parts». - + `OPTIMIZE` не устраняет причину появления ошибки `Too many parts`. + +**Синтаксис** + +``` sql +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]] +``` + +Может применяться к таблицам семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md), [MaterializedView](../../engines/table-engines/special/materializedview.md) и [Buffer](../../engines/table-engines/special/buffer.md). Другие движки таблиц не поддерживаются. + +Если запрос `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md), ClickHouse создаёт задачу на слияние и ожидает её исполнения на всех узлах (если активирована настройка `replication_alter_partitions_sync`). + +- По умолчанию, если запросу `OPTIMIZE` не удалось выполнить слияние, то +ClickHouse не оповещает клиента. Чтобы включить оповещения, используйте настройку [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop). +- Если указать `PARTITION`, то оптимизация выполняется только для указанной партиции. [Как задавать имя партиции в запросах](alter/index.md#alter-how-to-specify-part-expr). +- Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске данных. Кроме того, слияние является принудительным, даже если выполняются параллельные слияния. +- Если указать `DEDUPLICATE`, то произойдет схлопывание полностью одинаковых строк (сравниваются значения во всех столбцах), имеет смысл только для движка MergeTree. + +## Выражение BY {#by-expression} + +Чтобы выполнить дедупликацию по произвольному набору столбцов, вы можете явно указать список столбцов или использовать любую комбинацию подстановки [`*`](../../sql-reference/statements/select/index.md#asterisk), выражений [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) и [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier). + + Список столбцов для дедупликации должен включать все столбцы, указанные в условиях сортировки (первичный ключ и ключ сортировки), а также в условиях партиционирования (ключ партиционирования). + + !!! note "Примечание" + Обратите внимание, что символ подстановки `*` обрабатывается так же, как и в запросах `SELECT`: столбцы `MATERIALIZED` и `ALIAS` не включаются в результат. + Если указать пустой список или выражение, которое возвращает пустой список, или дедуплицировать столбец по псевдониму (`ALIAS`), то сервер вернет ошибку. + + +**Примеры** + +Рассмотрим таблицу: + +``` sql +CREATE TABLE example ( + primary_key Int32, + secondary_key Int32, + value UInt32, + partition_key UInt32, + materialized_value UInt32 MATERIALIZED 12345, + aliased_value UInt32 ALIAS 2, + PRIMARY KEY primary_key +) ENGINE=MergeTree +PARTITION BY partition_key; +``` + +Прежний способ дедупликации, когда учитываются все столбцы. Строка удаляется только в том случае, если все значения во всех столбцах равны соответствующим значениям в предыдущей строке. + +``` sql +OPTIMIZE TABLE example FINAL DEDUPLICATE; +``` + +Дедупликация по всем столбцам, кроме `ALIAS` и `MATERIALIZED`: `primary_key`, `secondary_key`, `value`, `partition_key` и `materialized_value`. + + +``` sql +OPTIMIZE TABLE example FINAL DEDUPLICATE BY *; +``` + +Дедупликация по всем столбцам, кроме `ALIAS`, `MATERIALIZED` и `materialized_value`: столбцы `primary_key`, `secondary_key`, `value` и `partition_key`. + + +``` sql +OPTIMIZE TABLE example FINAL DEDUPLICATE BY * EXCEPT materialized_value; +``` + +Дедупликация по столбцам `primary_key`, `secondary_key` и `partition_key`. + +``` sql +OPTIMIZE TABLE example FINAL DEDUPLICATE BY primary_key, secondary_key, partition_key; +``` + +Дедупликация по любому столбцу, соответствующему регулярному выражению: столбцам `primary_key`, `secondary_key` и `partition_key`. + +``` sql +OPTIMIZE TABLE example FINAL DEDUPLICATE BY COLUMNS('.*_key'); +``` diff --git a/docs/ru/sql-reference/statements/rename.md b/docs/ru/sql-reference/statements/rename.md index 104918c1a73..192426dbafa 100644 --- a/docs/ru/sql-reference/statements/rename.md +++ b/docs/ru/sql-reference/statements/rename.md @@ -3,8 +3,16 @@ toc_priority: 48 toc_title: RENAME --- -# RENAME {#misc_operations-rename} +# RENAME Statement {#misc_operations-rename} +## RENAME DATABASE {#misc_operations-rename_database} +Переименование базы данных + +``` +RENAME DATABASE atomic_database1 TO atomic_database2 [ON CLUSTER cluster] +``` + +## RENAME TABLE {#misc_operations-rename_table} Переименовывает одну или несколько таблиц. ``` sql @@ -12,5 +20,3 @@ RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... ``` Переименовывание таблицы является лёгкой операцией. Если вы указали после `TO` другую базу данных, то таблица будет перенесена в эту базу данных. При этом, директории с базами данных должны быть расположены в одной файловой системе (иначе возвращается ошибка). В случае переименования нескольких таблиц в одном запросе — это неатомарная операция, может выполнится частично, запросы в других сессиях могут получить ошибку `Table ... doesn't exist...`. - - diff --git a/docs/ru/sql-reference/statements/select/index.md b/docs/ru/sql-reference/statements/select/index.md index 886952ea5cf..a3b4e889397 100644 --- a/docs/ru/sql-reference/statements/select/index.md +++ b/docs/ru/sql-reference/statements/select/index.md @@ -45,6 +45,7 @@ SELECT [DISTINCT] expr_list - [Секция SELECT](#select-clause) - [Секция DISTINCT](distinct.md) - [Секция LIMIT](limit.md) + [Секция OFFSET](offset.md) - [Секция UNION ALL](union.md) - [Секция INTO OUTFILE](into-outfile.md) - [Секция FORMAT](format.md) diff --git a/docs/ru/sql-reference/statements/select/limit.md b/docs/ru/sql-reference/statements/select/limit.md index 03b720226f0..e4012e89556 100644 --- a/docs/ru/sql-reference/statements/select/limit.md +++ b/docs/ru/sql-reference/statements/select/limit.md @@ -12,13 +12,16 @@ toc_title: LIMIT При отсутствии секции [ORDER BY](order-by.md), однозначно сортирующей результат, результат может быть произвольным и может являться недетерминированным. +!!! note "Примечание" + Количество возвращаемых строк может зависеть также от настройки [limit](../../../operations/settings/settings.md#limit). + ## Модификатор LIMIT ... WITH TIES {#limit-with-ties} Когда вы установите модификатор WITH TIES для `LIMIT n[,m]` и указываете `ORDER BY expr_list`, вы получите первые `n` или `n,m` строк и дополнительно все строки с теми же самым значениями полей указанных в `ORDER BY` равными строке на позиции `n` для `LIMIT n` или `m` для `LIMIT n,m`. Этот модификатор также может быть скомбинирован с [ORDER BY ... WITH FILL модификатором](../../../sql-reference/statements/select/order-by.md#orderby-with-fill) -Для примера следующий запрос +Для примера следующий запрос: ```sql SELECT * FROM ( SELECT number%50 AS n FROM numbers(100) diff --git a/docs/ru/sql-reference/statements/select/offset.md b/docs/ru/sql-reference/statements/select/offset.md new file mode 100644 index 00000000000..31ff1d6ea8b --- /dev/null +++ b/docs/ru/sql-reference/statements/select/offset.md @@ -0,0 +1,86 @@ +--- +toc_title: OFFSET +--- + +# Секция OFFSET FETCH {#offset-fetch} + +`OFFSET` и `FETCH` позволяют извлекать данные по частям. Они указывают строки, которые вы хотите получить в результате запроса. + +``` sql +OFFSET offset_row_count {ROW | ROWS}] [FETCH {FIRST | NEXT} fetch_row_count {ROW | ROWS} {ONLY | WITH TIES}] +``` + +`offset_row_count` или `fetch_row_count` может быть числом или литеральной константой. Если вы не задаете `fetch_row_count` явно, используется значение по умолчанию, равное 1. + +`OFFSET` указывает количество строк, которые необходимо пропустить перед началом возврата строк из запроса. + +`FETCH` указывает максимальное количество строк, которые могут быть получены в результате запроса. + +Опция `ONLY` используется для возврата строк, которые следуют сразу же за строками, пропущенными секцией `OFFSET`. В этом случае `FETCH` — это альтернатива [LIMIT](../../../sql-reference/statements/select/limit.md). Например, следующий запрос + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY; +``` + +идентичен запросу + +``` sql +SELECT * FROM test_fetch ORDER BY a LIMIT 3 OFFSET 1; +``` + +Опция `WITH TIES` используется для возврата дополнительных строк, которые привязываются к последней в результате запроса. Например, если `fetch_row_count` имеет значение 5 и существуют еще 2 строки с такими же значениями столбцов, указанных в `ORDER BY`, что и у пятой строки результата, то финальный набор будет содержать 7 строк. + +!!! note "Примечание" + Секция `OFFSET` должна находиться перед секцией `FETCH`, если обе присутствуют. + +!!! note "Примечание" + Общее количество пропущенных строк может зависеть также от настройки [offset](../../../operations/settings/settings.md#offset). + +## Примеры {#examples} + +Входная таблица: + +``` text +┌─a─┬─b─┐ +│ 1 │ 1 │ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 1 │ 3 │ +│ 5 │ 4 │ +│ 0 │ 6 │ +│ 5 │ 7 │ +└───┴───┘ +``` + +Использование опции `ONLY`: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS ONLY; +``` + +Результат: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +└───┴───┘ +``` + +Использование опции `WITH TIES`: + +``` sql +SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; +``` + +Результат: + +``` text +┌─a─┬─b─┐ +│ 2 │ 1 │ +│ 3 │ 4 │ +│ 5 │ 4 │ +│ 5 │ 7 │ +└───┴───┘ +``` diff --git a/docs/ru/sql-reference/statements/select/order-by.md b/docs/ru/sql-reference/statements/select/order-by.md index 9ddec923701..cb49d167b13 100644 --- a/docs/ru/sql-reference/statements/select/order-by.md +++ b/docs/ru/sql-reference/statements/select/order-by.md @@ -392,84 +392,3 @@ ORDER BY │ 1970-03-12 │ 1970-01-08 │ original │ └────────────┴────────────┴──────────┘ ``` - -## Секция OFFSET FETCH {#offset-fetch} - -`OFFSET` и `FETCH` позволяют извлекать данные по частям. Они указывают строки, которые вы хотите получить в результате запроса. - -``` sql -OFFSET offset_row_count {ROW | ROWS}] [FETCH {FIRST | NEXT} fetch_row_count {ROW | ROWS} {ONLY | WITH TIES}] -``` - -`offset_row_count` или `fetch_row_count` может быть числом или литеральной константой. Если вы не используете `fetch_row_count`, то его значение равно 1. - -`OFFSET` указывает количество строк, которые необходимо пропустить перед началом возврата строк из запроса. - -`FETCH` указывает максимальное количество строк, которые могут быть получены в результате запроса. - -Опция `ONLY` используется для возврата строк, которые следуют сразу же за строками, пропущенными секцией `OFFSET`. В этом случае `FETCH` — это альтернатива [LIMIT](../../../sql-reference/statements/select/limit.md). Например, следующий запрос - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY; -``` - -идентичен запросу - -``` sql -SELECT * FROM test_fetch ORDER BY a LIMIT 3 OFFSET 1; -``` - -Опция `WITH TIES` используется для возврата дополнительных строк, которые привязываются к последней в результате запроса. Например, если `fetch_row_count` имеет значение 5 и существуют еще 2 строки с такими же значениями столбцов, указанных в `ORDER BY`, что и у пятой строки результата, то финальный набор будет содержать 7 строк. - -!!! note "Примечание" - Секция `OFFSET` должна находиться перед секцией `FETCH`, если обе присутствуют. - -### Примеры {#examples} - -Входная таблица: - -``` text -┌─a─┬─b─┐ -│ 1 │ 1 │ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 1 │ 3 │ -│ 5 │ 4 │ -│ 0 │ 6 │ -│ 5 │ 7 │ -└───┴───┘ -``` - -Использование опции `ONLY`: - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS ONLY; -``` - -Результат: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 5 │ 4 │ -└───┴───┘ -``` - -Использование опции `WITH TIES`: - -``` sql -SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; -``` - -Результат: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 3 │ 4 │ -│ 5 │ 4 │ -│ 5 │ 7 │ -└───┴───┘ -``` - diff --git a/docs/ru/sql-reference/statements/system.md b/docs/ru/sql-reference/statements/system.md index ab68033d4f3..f0f9b77b5ba 100644 --- a/docs/ru/sql-reference/statements/system.md +++ b/docs/ru/sql-reference/statements/system.md @@ -204,6 +204,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name] ClickHouse может управлять фоновыми процессами связанными c репликацией в таблицах семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md). ### STOP FETCHES {#query_language-system-stop-fetches} + Позволяет остановить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`: Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет. @@ -212,6 +213,7 @@ SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name] ``` ### START FETCHES {#query_language-system-start-fetches} + Позволяет запустить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`: Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет. @@ -220,6 +222,7 @@ SYSTEM START FETCHES [[db.]replicated_merge_tree_family_table_name] ``` ### STOP REPLICATED SENDS {#query_language-system-start-replicated-sends} + Позволяет остановить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`: ``` sql @@ -227,6 +230,7 @@ SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] ``` ### START REPLICATED SENDS {#query_language-system-start-replicated-sends} + Позволяет запустить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`: ``` sql @@ -234,6 +238,7 @@ SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name] ``` ### STOP REPLICATION QUEUES {#query_language-system-stop-replication-queues} + Останавливает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER: ``` sql @@ -241,6 +246,7 @@ SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] ``` ### START REPLICATION QUEUES {#query_language-system-start-replication-queues} + Запускает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER: ``` sql @@ -248,20 +254,24 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] ``` ### SYNC REPLICA {#query_language-system-sync-replica} + Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, будет работать до достижения `receive_timeout`, если синхронизация для таблицы отключена в настоящий момент времени: ``` sql SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name ``` +После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` синхронизирует команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все синхронизированные команды. + ### RESTART REPLICA {#query_language-system-restart-replica} -Реинициализация состояния Zookeeper сессий для таблицы семейства `ReplicatedMergeTree`, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо -Инициализация очереди репликации на основе данных ZooKeeper, происходит так же как при attach table. На короткое время таблица станет недоступной для любых операций. + +Реинициализация состояния Zookeeper-сессий для таблицы семейства `ReplicatedMergeTree`. Сравнивает текущее состояние с тем, что хранится в Zookeeper, как источник правды, и добавляет задачи в очередь репликации в Zookeeper, если необходимо. +Инициализация очереди репликации на основе данных ZooKeeper происходит так же, как при attach table. На короткое время таблица станет недоступной для любых операций. ``` sql SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name ``` ### RESTART REPLICAS {#query_language-system-restart-replicas} -Реинициализация состояния Zookeeper сессий для всех `ReplicatedMergeTree` таблиц, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо +Реинициализация состояния ZooKeeper-сессий для всех `ReplicatedMergeTree` таблиц. Сравнивает текущее состояние реплики с тем, что хранится в ZooKeeper, как c источником правды, и добавляет задачи в очередь репликации в ZooKeeper, если необходимо. diff --git a/docs/ru/sql-reference/statements/truncate.md b/docs/ru/sql-reference/statements/truncate.md index b23d96d5b08..63f7fa86ea5 100644 --- a/docs/ru/sql-reference/statements/truncate.md +++ b/docs/ru/sql-reference/statements/truncate.md @@ -11,6 +11,6 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] Удаляет все данные из таблицы. Если условие `IF EXISTS` не указано, запрос вернет ошибку, если таблицы не существует. -Запрос `TRUNCATE` не поддерживается для следующих движков: [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md) и [Null](../../engines/table-engines/special/null.md). +Запрос `TRUNCATE` не поддерживается для следующих движков: [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md), [Buffer](../../engines/table-engines/special/buffer.md) и [Null](../../engines/table-engines/special/null.md). diff --git a/docs/ru/sql-reference/syntax.md b/docs/ru/sql-reference/syntax.md index 6a923fd6b58..dbbf5f92612 100644 --- a/docs/ru/sql-reference/syntax.md +++ b/docs/ru/sql-reference/syntax.md @@ -128,7 +128,7 @@ expr AS alias Например, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. - В функции [CAST](sql_reference/syntax.md#type_conversion_function-cast), ключевое слово `AS` имеет другое значение. Смотрите описание функции. + В функции [CAST](../sql_reference/syntax.md#type_conversion_function-cast), ключевое слово `AS` имеет другое значение. Смотрите описание функции. - `expr` — любое выражение, которое поддерживает ClickHouse. @@ -138,7 +138,7 @@ expr AS alias Например, `SELECT "table t".column_name FROM table_name AS "table t"`. -### Примечания по использованию {#primechaniia-po-ispolzovaniiu} +### Примечания по использованию {#notes-on-usage} Синонимы являются глобальными для запроса или подзапроса, и вы можете определить синоним в любой части запроса для любого выражения. Например, `SELECT (1 AS n) + 2, n`. @@ -169,9 +169,9 @@ Received exception from server (version 18.14.17): Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. ``` -В этом примере мы объявили таблицу `t` со столбцом `b`. Затем, при выборе данных, мы определили синоним `sum(b) AS b`. Поскольку синонимы глобальные, то ClickHouse заменил литерал `b` в выражении `argMax(a, b)` выражением `sum(b)`. Эта замена вызвала исключение. +В этом примере мы объявили таблицу `t` со столбцом `b`. Затем, при выборе данных, мы определили синоним `sum(b) AS b`. Поскольку синонимы глобальные, то ClickHouse заменил литерал `b` в выражении `argMax(a, b)` выражением `sum(b)`. Эта замена вызвала исключение. Можно изменить это поведение, включив настройку [prefer_column_name_to_alias](../operations/settings/settings.md#prefer_column_name_to_alias), для этого нужно установить ее в значение `1`. -## Звёздочка {#zviozdochka} +## Звёздочка {#asterisk} В запросе `SELECT`, вместо выражения может стоять звёздочка. Подробнее смотрите раздел «SELECT». @@ -180,4 +180,3 @@ Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception Выражение представляет собой функцию, идентификатор, литерал, применение оператора, выражение в скобках, подзапрос, звёздочку. А также может содержать синоним. Список выражений - одно выражение или несколько выражений через запятую. Функции и операторы, в свою очередь, в качестве аргументов, могут иметь произвольные выражения. - diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index 66637276726..2d8afe28f1e 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -65,10 +65,10 @@ postgres=# INSERT INTO test (int_id, str, "float") VALUES (1,'test',2); INSERT 0 1 postgresql> SELECT * FROM test; - int_id | int_nullable | float | str | float_nullable ---------+--------------+-------+------+---------------- - 1 | | 2 | test | -(1 row) + int_id | int_nullable | float | str | float_nullable + --------+--------------+-------+------+---------------- + 1 | | 2 | test | + (1 row) ``` Получение данных в ClickHouse: diff --git a/docs/ru/sql-reference/table-functions/s3.md b/docs/ru/sql-reference/table-functions/s3.md index 1d3fc8cfdb7..e062e59c67c 100644 --- a/docs/ru/sql-reference/table-functions/s3.md +++ b/docs/ru/sql-reference/table-functions/s3.md @@ -18,7 +18,7 @@ s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compres - `path` — URL-адрес бакета с указанием пути к файлу. Поддерживает следующие подстановочные знаки в режиме "только чтение": `*, ?, {abc,def} и {N..M}` где `N, M` — числа, `'abc', 'def'` — строки. Подробнее смотри [здесь](../../engines/table-engines/integrations/s3.md#wildcards-in-path). - `format` — [формат](../../interfaces/formats.md#formats) файла. - `structure` — cтруктура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`. -- `compression` — автоматически обнаруживает сжатие по расширению файла. Возможные значения: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. Необязательный параметр. +- `compression` — автоматически обнаруживает сжатие по расширению файла. Возможные значения: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Необязательный параметр. **Возвращаемые значения** diff --git a/docs/tools/single_page.py b/docs/tools/single_page.py index b88df5a03cb..a1e650d3ad3 100644 --- a/docs/tools/single_page.py +++ b/docs/tools/single_page.py @@ -109,7 +109,8 @@ def build_single_page_version(lang, args, nav, cfg): extra['single_page'] = True extra['is_amp'] = False - with open(os.path.join(args.docs_dir, lang, 'single.md'), 'w') as single_md: + single_md_path = os.path.join(args.docs_dir, lang, 'single.md') + with open(single_md_path, 'w') as single_md: concatenate(lang, args.docs_dir, single_md, nav) with util.temp_dir() as site_temp: @@ -221,3 +222,7 @@ def build_single_page_version(lang, args, nav, cfg): subprocess.check_call(' '.join(create_pdf_command), shell=True) logging.info(f'Finished building single page version for {lang}') + + if os.path.exists(single_md_path): + os.unlink(single_md_path) + \ No newline at end of file diff --git a/docs/zh/commercial/cloud.md b/docs/zh/commercial/cloud.md index c74ffa93e9a..e0a297f51c8 100644 --- a/docs/zh/commercial/cloud.md +++ b/docs/zh/commercial/cloud.md @@ -31,7 +31,7 @@ toc_title: 云 ## 阿里云 {#alibaba-cloud} -阿里云的 ClickHouse 托管服务 [中国站](https://www.aliyun.com/product/clickhouse) (国际站于2021年5月初开放) 提供以下主要功能: +[阿里云的 ClickHouse 托管服务](https://www.alibabacloud.com/zh/product/clickhouse) 提供以下主要功能: - 基于阿里飞天分布式系统的高可靠云盘存储引擎 - 按需扩容,无需手动进行数据搬迁 diff --git a/docs/zh/development/build.md b/docs/zh/development/build.md index 1aa5c1c97b7..01e0740bfa4 100644 --- a/docs/zh/development/build.md +++ b/docs/zh/development/build.md @@ -35,28 +35,12 @@ sudo apt-get install git cmake ninja-build 或cmake3而不是旧系统上的cmake。 或者在早期版本的系统中用 cmake3 替代 cmake -## 安装 GCC 10 {#an-zhuang-gcc-10} +## 安装 Clang -有几种方法可以做到这一点。 +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -### 安装 PPA 包 {#an-zhuang-ppa-bao} - -``` bash -sudo apt-get install software-properties-common -sudo apt-add-repository ppa:ubuntu-toolchain-r/test -sudo apt-get update -sudo apt-get install gcc-10 g++-10 -``` - -### 源码安装 gcc {#yuan-ma-an-zhuang-gcc} - -请查看 [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) - -## 使用 GCC 10 来编译 {#shi-yong-gcc-10-lai-bian-yi} - -``` bash -export CC=gcc-10 -export CXX=g++-10 +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" ``` ## 拉取 ClickHouse 源码 {#la-qu-clickhouse-yuan-ma-1} diff --git a/docs/zh/development/developer-instruction.md b/docs/zh/development/developer-instruction.md index 53aab5dc086..04950c11521 100644 --- a/docs/zh/development/developer-instruction.md +++ b/docs/zh/development/developer-instruction.md @@ -123,17 +123,13 @@ ClickHouse使用多个外部库进行构建。大多数外部库不需要单独 # C++ 编译器 {#c-bian-yi-qi} -GCC编译器从版本9开始,以及Clang版本\>=8都可支持构建ClickHouse。 +We support clang starting from version 11. -Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性能较好(根据测评,最多可以相差几个百分点)。Clang通常可以更加便捷的开发。我们的持续集成(CI)平台会运行大约十二种构建组合的检查。 +On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) -在Ubuntu上安装GCC,请执行:`sudo apt install gcc g++` - -请使用`gcc --version`查看gcc的版本。如果gcc版本低于9,请参考此处的指示:https://clickhouse.tech/docs/zh/development/build/#an-zhuang-gcc-10 。 - -在Mac OS X上安装GCC,请执行:`brew install gcc` - -如果您决定使用Clang,还可以同时安装 `libc++`以及`lld`,前提是您也熟悉它们。此外,也推荐使用`ccache`。 +```bash +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" +``` # 构建的过程 {#gou-jian-de-guo-cheng} @@ -146,7 +142,7 @@ Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性 在`build`目录下,通过运行CMake配置构建。 在第一次运行之前,请定义用于指定编译器的环境变量(本示例中为gcc 9 编译器)。 - export CC=gcc-10 CXX=g++-10 + export CC=clang CXX=clang++ cmake .. `CC`变量指代C的编译器(C Compiler的缩写),而`CXX`变量指代要使用哪个C++编译器进行编译。 diff --git a/docs/zh/development/style.md b/docs/zh/development/style.md index c8e883920dd..bb9bfde7b9b 100644 --- a/docs/zh/development/style.md +++ b/docs/zh/development/style.md @@ -696,7 +696,7 @@ auto s = std::string{"Hello"}; **2.** 语言: C++20. -**3.** 编译器: `gcc`。 此时(2020年08月),代码使用9.3版编译。(它也可以使用`clang 8` 编译) +**3.** 编译器: `clang`。 此时(2021年03月),代码使用11版编译。(它也可以使用`gcc` 编译 but it is not suitable for production) 使用标准库 (`libc++`)。 diff --git a/docs/zh/getting-started/example-datasets/ontime.md b/docs/zh/getting-started/example-datasets/ontime.md index 3921f71fc7e..6d888b2196c 100644 --- a/docs/zh/getting-started/example-datasets/ontime.md +++ b/docs/zh/getting-started/example-datasets/ontime.md @@ -29,126 +29,127 @@ done 创建表结构: ``` sql -CREATE TABLE `ontime` ( - `Year` UInt16, - `Quarter` UInt8, - `Month` UInt8, - `DayofMonth` UInt8, - `DayOfWeek` UInt8, - `FlightDate` Date, - `UniqueCarrier` FixedString(7), - `AirlineID` Int32, - `Carrier` FixedString(2), - `TailNum` String, - `FlightNum` String, - `OriginAirportID` Int32, - `OriginAirportSeqID` Int32, - `OriginCityMarketID` Int32, - `Origin` FixedString(5), - `OriginCityName` String, - `OriginState` FixedString(2), - `OriginStateFips` String, - `OriginStateName` String, - `OriginWac` Int32, - `DestAirportID` Int32, - `DestAirportSeqID` Int32, - `DestCityMarketID` Int32, - `Dest` FixedString(5), - `DestCityName` String, - `DestState` FixedString(2), - `DestStateFips` String, - `DestStateName` String, - `DestWac` Int32, - `CRSDepTime` Int32, - `DepTime` Int32, - `DepDelay` Int32, - `DepDelayMinutes` Int32, - `DepDel15` Int32, - `DepartureDelayGroups` String, - `DepTimeBlk` String, - `TaxiOut` Int32, - `WheelsOff` Int32, - `WheelsOn` Int32, - `TaxiIn` Int32, - `CRSArrTime` Int32, - `ArrTime` Int32, - `ArrDelay` Int32, - `ArrDelayMinutes` Int32, - `ArrDel15` Int32, - `ArrivalDelayGroups` Int32, - `ArrTimeBlk` String, - `Cancelled` UInt8, - `CancellationCode` FixedString(1), - `Diverted` UInt8, - `CRSElapsedTime` Int32, - `ActualElapsedTime` Int32, - `AirTime` Int32, - `Flights` Int32, - `Distance` Int32, - `DistanceGroup` UInt8, - `CarrierDelay` Int32, - `WeatherDelay` Int32, - `NASDelay` Int32, - `SecurityDelay` Int32, - `LateAircraftDelay` Int32, - `FirstDepTime` String, - `TotalAddGTime` String, - `LongestAddGTime` String, - `DivAirportLandings` String, - `DivReachedDest` String, - `DivActualElapsedTime` String, - `DivArrDelay` String, - `DivDistance` String, - `Div1Airport` String, - `Div1AirportID` Int32, - `Div1AirportSeqID` Int32, - `Div1WheelsOn` String, - `Div1TotalGTime` String, - `Div1LongestGTime` String, - `Div1WheelsOff` String, - `Div1TailNum` String, - `Div2Airport` String, - `Div2AirportID` Int32, - `Div2AirportSeqID` Int32, - `Div2WheelsOn` String, - `Div2TotalGTime` String, - `Div2LongestGTime` String, - `Div2WheelsOff` String, - `Div2TailNum` String, - `Div3Airport` String, - `Div3AirportID` Int32, - `Div3AirportSeqID` Int32, - `Div3WheelsOn` String, - `Div3TotalGTime` String, - `Div3LongestGTime` String, - `Div3WheelsOff` String, - `Div3TailNum` String, - `Div4Airport` String, - `Div4AirportID` Int32, - `Div4AirportSeqID` Int32, - `Div4WheelsOn` String, - `Div4TotalGTime` String, - `Div4LongestGTime` String, - `Div4WheelsOff` String, - `Div4TailNum` String, - `Div5Airport` String, - `Div5AirportID` Int32, - `Div5AirportSeqID` Int32, - `Div5WheelsOn` String, - `Div5TotalGTime` String, - `Div5LongestGTime` String, - `Div5WheelsOff` String, - `Div5TailNum` String +CREATE TABLE `ontime` +( + `Year` UInt16, + `Quarter` UInt8, + `Month` UInt8, + `DayofMonth` UInt8, + `DayOfWeek` UInt8, + `FlightDate` Date, + `Reporting_Airline` String, + `DOT_ID_Reporting_Airline` Int32, + `IATA_CODE_Reporting_Airline` String, + `Tail_Number` Int32, + `Flight_Number_Reporting_Airline` String, + `OriginAirportID` Int32, + `OriginAirportSeqID` Int32, + `OriginCityMarketID` Int32, + `Origin` FixedString(5), + `OriginCityName` String, + `OriginState` FixedString(2), + `OriginStateFips` String, + `OriginStateName` String, + `OriginWac` Int32, + `DestAirportID` Int32, + `DestAirportSeqID` Int32, + `DestCityMarketID` Int32, + `Dest` FixedString(5), + `DestCityName` String, + `DestState` FixedString(2), + `DestStateFips` String, + `DestStateName` String, + `DestWac` Int32, + `CRSDepTime` Int32, + `DepTime` Int32, + `DepDelay` Int32, + `DepDelayMinutes` Int32, + `DepDel15` Int32, + `DepartureDelayGroups` String, + `DepTimeBlk` String, + `TaxiOut` Int32, + `WheelsOff` Int32, + `WheelsOn` Int32, + `TaxiIn` Int32, + `CRSArrTime` Int32, + `ArrTime` Int32, + `ArrDelay` Int32, + `ArrDelayMinutes` Int32, + `ArrDel15` Int32, + `ArrivalDelayGroups` Int32, + `ArrTimeBlk` String, + `Cancelled` UInt8, + `CancellationCode` FixedString(1), + `Diverted` UInt8, + `CRSElapsedTime` Int32, + `ActualElapsedTime` Int32, + `AirTime` Nullable(Int32), + `Flights` Int32, + `Distance` Int32, + `DistanceGroup` UInt8, + `CarrierDelay` Int32, + `WeatherDelay` Int32, + `NASDelay` Int32, + `SecurityDelay` Int32, + `LateAircraftDelay` Int32, + `FirstDepTime` String, + `TotalAddGTime` String, + `LongestAddGTime` String, + `DivAirportLandings` String, + `DivReachedDest` String, + `DivActualElapsedTime` String, + `DivArrDelay` String, + `DivDistance` String, + `Div1Airport` String, + `Div1AirportID` Int32, + `Div1AirportSeqID` Int32, + `Div1WheelsOn` String, + `Div1TotalGTime` String, + `Div1LongestGTime` String, + `Div1WheelsOff` String, + `Div1TailNum` String, + `Div2Airport` String, + `Div2AirportID` Int32, + `Div2AirportSeqID` Int32, + `Div2WheelsOn` String, + `Div2TotalGTime` String, + `Div2LongestGTime` String, + `Div2WheelsOff` String, + `Div2TailNum` String, + `Div3Airport` String, + `Div3AirportID` Int32, + `Div3AirportSeqID` Int32, + `Div3WheelsOn` String, + `Div3TotalGTime` String, + `Div3LongestGTime` String, + `Div3WheelsOff` String, + `Div3TailNum` String, + `Div4Airport` String, + `Div4AirportID` Int32, + `Div4AirportSeqID` Int32, + `Div4WheelsOn` String, + `Div4TotalGTime` String, + `Div4LongestGTime` String, + `Div4WheelsOff` String, + `Div4TailNum` String, + `Div5Airport` String, + `Div5AirportID` Int32, + `Div5AirportSeqID` Int32, + `Div5WheelsOn` String, + `Div5TotalGTime` String, + `Div5LongestGTime` String, + `Div5WheelsOff` String, + `Div5TailNum` String ) ENGINE = MergeTree -PARTITION BY Year -ORDER BY (Carrier, FlightDate) -SETTINGS index_granularity = 8192; + PARTITION BY Year + ORDER BY (IATA_CODE_Reporting_Airline, FlightDate) + SETTINGS index_granularity = 8192; ``` 加载数据: ``` bash -$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done +ls -1 *.zip | xargs -I{} -P $(nproc) bash -c "echo {}; unzip -cq {} '*.csv' | sed 's/\.00//g' | clickhouse-client --input_format_with_names_use_header=0 --query='INSERT INTO ontime FORMAT CSVWithNames'" ``` ## 下载预处理好的分区数据 {#xia-zai-yu-chu-li-hao-de-fen-qu-shu-ju} @@ -212,7 +213,7 @@ LIMIT 10; Q4. 查询2007年各航空公司延误超过10分钟以上的次数 ``` sql -SELECT Carrier, count(*) +SELECT IATA_CODE_Reporting_Airline AS Carrier, count(*) FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier @@ -226,29 +227,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year=2007 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` 更好的查询版本: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year=2007 GROUP BY Carrier @@ -262,29 +263,29 @@ SELECT Carrier, c, c2, c*100/c2 as c3 FROM ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 GROUP BY Carrier -) +) q JOIN ( SELECT - Carrier, + IATA_CODE_Reporting_Airline AS Carrier, count(*) AS c2 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier -) USING Carrier +) qq USING Carrier ORDER BY c3 DESC; ``` 更好的查询版本: ``` sql -SELECT Carrier, avg(DepDelay>10)*100 AS c3 +SELECT IATA_CODE_Reporting_Airline AS Carrier, avg(DepDelay>10)*100 AS c3 FROM ontime WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier @@ -303,7 +304,7 @@ FROM from ontime WHERE DepDelay>10 GROUP BY Year -) +) q JOIN ( select @@ -311,7 +312,7 @@ JOIN count(*) as c2 from ontime GROUP BY Year -) USING (Year) +) qq USING (Year) ORDER BY Year; ``` @@ -346,7 +347,7 @@ Q10. ``` sql SELECT - min(Year), max(Year), Carrier, count(*) AS cnt, + min(Year), max(Year), IATA_CODE_Reporting_Airline AS Carrier, count(*) AS cnt, sum(ArrDelayMinutes>30) AS flights_delayed, round(sum(ArrDelayMinutes>30)/count(*),2) AS rate FROM ontime diff --git a/docs/zh/operations/system-tables/functions.md b/docs/zh/operations/system-tables/functions.md index ff716b0bc6c..8229a94cd5c 100644 --- a/docs/zh/operations/system-tables/functions.md +++ b/docs/zh/operations/system-tables/functions.md @@ -1,13 +1,30 @@ ---- -machine_translated: true -machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 ---- +# system.functions {#system-functions} -# 系统。功能 {#system-functions} - -包含有关正常函数和聚合函数的信息。 +包含有关常规函数和聚合函数的信息。 列: - `name`(`String`) – The name of the function. - `is_aggregate`(`UInt8`) — Whether the function is aggregate. + +**举例** +``` + SELECT * FROM system.functions LIMIT 10; +``` + +``` +┌─name─────────────────────┬─is_aggregate─┬─case_insensitive─┬─alias_to─┐ +│ sumburConsistentHash │ 0 │ 0 │ │ +│ yandexConsistentHash │ 0 │ 0 │ │ +│ demangle │ 0 │ 0 │ │ +│ addressToLine │ 0 │ 0 │ │ +│ JSONExtractRaw │ 0 │ 0 │ │ +│ JSONExtractKeysAndValues │ 0 │ 0 │ │ +│ JSONExtract │ 0 │ 0 │ │ +│ JSONExtractString │ 0 │ 0 │ │ +│ JSONExtractFloat │ 0 │ 0 │ │ +│ JSONExtractInt │ 0 │ 0 │ │ +└──────────────────────────┴──────────────┴──────────────────┴──────────┘ + +10 rows in set. Elapsed: 0.002 sec. +``` diff --git a/docs/zh/sql-reference/functions/array-functions.md b/docs/zh/sql-reference/functions/array-functions.md index ac5dae3a97e..4f6dbc0d87d 100644 --- a/docs/zh/sql-reference/functions/array-functions.md +++ b/docs/zh/sql-reference/functions/array-functions.md @@ -606,7 +606,7 @@ SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; 如果要获取数组中唯一项的列表,可以使用arrayReduce(‘groupUniqArray’,arr)。 -## arryjoin(arr) {#array-functions-join} +## arrayJoin(arr) {#array-functions-join} 一个特殊的功能。请参见[«ArrayJoin函数»](array-join.md#functions_arrayjoin)部分。 diff --git a/docs/zh/sql-reference/functions/other-functions.md b/docs/zh/sql-reference/functions/other-functions.md index b17a5e89332..c58c4bd1510 100644 --- a/docs/zh/sql-reference/functions/other-functions.md +++ b/docs/zh/sql-reference/functions/other-functions.md @@ -477,6 +477,103 @@ FROM 1 rows in set. Elapsed: 0.002 sec. + +## indexHint {#indexhint} +输出符合索引选择范围内的所有数据,同时不实用参数中的表达式进行过滤。 + +传递给函数的表达式参数将不会被计算,但ClickHouse使用参数中的表达式进行索引过滤。 + +**返回值** + +- 1。 + +**示例** + +这是一个包含[ontime](../../getting-started/example-datasets/ontime.md)测试数据集的测试表。 + +``` +SELECT count() FROM ontime + +┌─count()─┐ +│ 4276457 │ +└─────────┘ +``` + +该表使用`(FlightDate, (Year, FlightDate))`作为索引。 + +对该表进行如下的查询: + +``` +:) SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k + +SELECT + FlightDate AS k, + count() +FROM ontime +GROUP BY k +ORDER BY k ASC + +┌──────────k─┬─count()─┐ +│ 2017-01-01 │ 13970 │ +│ 2017-01-02 │ 15882 │ +........................ +│ 2017-09-28 │ 16411 │ +│ 2017-09-29 │ 16384 │ +│ 2017-09-30 │ 12520 │ +└────────────┴─────────┘ + +273 rows in set. Elapsed: 0.072 sec. Processed 4.28 million rows, 8.55 MB (59.00 million rows/s., 118.01 MB/s.) +``` + +在这个查询中,由于没有使用索引,所以ClickHouse将处理整个表的所有数据(`Processed 4.28 million rows`)。使用下面的查询尝试使用索引进行查询: + +``` +:) SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k + +SELECT + FlightDate AS k, + count() +FROM ontime +WHERE k = '2017-09-15' +GROUP BY k +ORDER BY k ASC + +┌──────────k─┬─count()─┐ +│ 2017-09-15 │ 16428 │ +└────────────┴─────────┘ + +1 rows in set. Elapsed: 0.014 sec. Processed 32.74 thousand rows, 65.49 KB (2.31 million rows/s., 4.63 MB/s.) +``` + +在最后一行的显示中,通过索引ClickHouse处理的行数明显减少(`Processed 32.74 thousand rows`)。 + +现在将表达式`k = '2017-09-15'`传递给`indexHint`函数: + +``` +:) SELECT FlightDate AS k, count() FROM ontime WHERE indexHint(k = '2017-09-15') GROUP BY k ORDER BY k + +SELECT + FlightDate AS k, + count() +FROM ontime +WHERE indexHint(k = '2017-09-15') +GROUP BY k +ORDER BY k ASC + +┌──────────k─┬─count()─┐ +│ 2017-09-14 │ 7071 │ +│ 2017-09-15 │ 16428 │ +│ 2017-09-16 │ 1077 │ +│ 2017-09-30 │ 8167 │ +└────────────┴─────────┘ + +4 rows in set. Elapsed: 0.004 sec. Processed 32.74 thousand rows, 65.49 KB (8.97 million rows/s., 17.94 MB/s.) +``` + +对于这个请求,根据ClickHouse显示ClickHouse与上一次相同的方式应用了索引(`Processed 32.74 thousand rows`)。但是,最终返回的结果集中并没有根据`k = '2017-09-15'`表达式进行过滤结果。 + +由于ClickHouse中使用稀疏索引,因此在读取范围时(本示例中为相邻日期),"额外"的数据将包含在索引结果中。使用`indexHint`函数可以查看到它们。 + ## 复制 {#replicate} 使用单个值填充一个数组。 diff --git a/docs/zh/sql-reference/statements/select/join.md b/docs/zh/sql-reference/statements/select/join.md index 2976484e09a..407c8ca6101 100644 --- a/docs/zh/sql-reference/statements/select/join.md +++ b/docs/zh/sql-reference/statements/select/join.md @@ -43,15 +43,15 @@ ClickHouse中提供的其他联接类型: Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting. -### ASOF加入使用 {#asof-join-usage} +### ASOF JOIN使用 {#asof-join-usage} `ASOF JOIN` 当您需要连接没有完全匹配的记录时非常有用。 -算法需要表中的特殊列。 本专栏: +该算法需要表中的特殊列。 该列需要满足: - 必须包含有序序列。 -- 可以是以下类型之一: [Int*,UInt*](../../../sql-reference/data-types/int-uint.md), [浮动\*](../../../sql-reference/data-types/float.md), [日期](../../../sql-reference/data-types/date.md), [日期时间](../../../sql-reference/data-types/datetime.md), [十进制\*](../../../sql-reference/data-types/decimal.md). -- 不能是唯一的列 `JOIN` +- 可以是以下类型之一: [Int*,UInt*](../../../sql-reference/data-types/int-uint.md), [Float\*](../../../sql-reference/data-types/float.md), [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md), [Decimal\*](../../../sql-reference/data-types/decimal.md). +- 不能是`JOIN`子句中唯一的列 语法 `ASOF JOIN ... ON`: @@ -62,9 +62,9 @@ ASOF LEFT JOIN table_2 ON equi_cond AND closest_match_cond ``` -您可以使用任意数量的相等条件和恰好一个最接近的匹配条件。 例如, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. +您可以使用任意数量的相等条件和一个且只有一个最接近的匹配条件。 例如, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. -支持最接近匹配的条件: `>`, `>=`, `<`, `<=`. +支持最接近匹配的运算符: `>`, `>=`, `<`, `<=`. 语法 `ASOF JOIN ... USING`: @@ -75,9 +75,9 @@ ASOF JOIN table_2 USING (equi_column1, ... equi_columnN, asof_column) ``` -`ASOF JOIN` 用途 `equi_columnX` 对于加入平等和 `asof_column` 用于加入与最接近的比赛 `table_1.asof_column >= table_2.asof_column` 条件。 该 `asof_column` 列总是在最后一个 `USING` 条款 +`table_1.asof_column >= table_2.asof_column` 中, `ASOF JOIN` 使用 `equi_columnX` 来进行条件匹配, `asof_column` 用于JOIN最接近匹配。 `asof_column` 列总是在最后一个 `USING` 条件中。 -例如,请考虑下表: +例如,参考下表: table_1 table_2 event | ev_time | user_id event | ev_time | user_id @@ -88,10 +88,10 @@ USING (equi_column1, ... equi_columnN, asof_column) event_1_2 | 13:00 | 42 event_2_3 | 13:00 | 42 ... ... -`ASOF JOIN` 可以从用户事件的时间戳 `table_1` 并找到一个事件 `table_2` 其中时间戳最接近事件的时间戳 `table_1` 对应于最接近的匹配条件。 如果可用,则相等的时间戳值是最接近的值。 在这里,该 `user_id` 列可用于连接相等和 `ev_time` 列可用于在最接近的匹配加入。 在我们的例子中, `event_1_1` 可以加入 `event_2_1` 和 `event_1_2` 可以加入 `event_2_3`,但是 `event_2_2` 不能加入。 +`ASOF JOIN`会从 `table_2` 中的用户事件时间戳找出和 `table_1` 中用户事件时间戳中最近的一个时间戳,来满足最接近匹配的条件。如果有得话,则相等的时间戳值是最接近的值。在此例中,`user_id` 列可用于条件匹配,`ev_time` 列可用于最接近匹配。在此例中,`event_1_1` 可以 JOIN `event_2_1`,`event_1_2` 可以JOIN `event_2_3`,但是 `event_2_2` 不能被JOIN。 !!! note "注" - `ASOF` 加入是 **不** 支持在 [加入我们](../../../engines/table-engines/special/join.md) 表引擎。 + `ASOF JOIN`在 [JOIN](../../../engines/table-engines/special/join.md) 表引擎中 **不受** 支持。 ## 分布式联接 {#global-join} diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index c917dbe30a3..09199e83026 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -33,8 +33,12 @@ option (ENABLE_CLICKHOUSE_OBFUSCATOR "Table data obfuscator (convert real data t ${ENABLE_CLICKHOUSE_ALL}) # https://clickhouse.tech/docs/en/operations/utilities/odbc-bridge/ -option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver" - ${ENABLE_CLICKHOUSE_ALL}) +if (ENABLE_ODBC) + option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver" + ${ENABLE_CLICKHOUSE_ALL}) +else () + option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver" OFF) +endif () option (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE "HTTP-server working like a proxy to Library dictionary source" ${ENABLE_CLICKHOUSE_ALL}) @@ -280,52 +284,52 @@ else () set (CLICKHOUSE_BUNDLE) if (ENABLE_CLICKHOUSE_SERVER) add_custom_target (clickhouse-server ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-server DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-server DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-server" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-server) endif () if (ENABLE_CLICKHOUSE_CLIENT) add_custom_target (clickhouse-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-client DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-client DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-client" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-client) endif () if (ENABLE_CLICKHOUSE_LOCAL) add_custom_target (clickhouse-local ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-local DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-local DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-local" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-local) endif () if (ENABLE_CLICKHOUSE_BENCHMARK) add_custom_target (clickhouse-benchmark ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-benchmark DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-benchmark DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-benchmark" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-benchmark) endif () if (ENABLE_CLICKHOUSE_COPIER) add_custom_target (clickhouse-copier ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-copier DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-copier DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-copier" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-copier) endif () if (ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG) add_custom_target (clickhouse-extract-from-config ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-extract-from-config DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-extract-from-config DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-extract-from-config" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-extract-from-config) endif () if (ENABLE_CLICKHOUSE_COMPRESSOR) add_custom_target (clickhouse-compressor ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-compressor DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-compressor DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-compressor" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-compressor) endif () if (ENABLE_CLICKHOUSE_FORMAT) add_custom_target (clickhouse-format ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-format DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-format DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-format" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-format) endif () if (ENABLE_CLICKHOUSE_OBFUSCATOR) add_custom_target (clickhouse-obfuscator ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-obfuscator DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-obfuscator DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-obfuscator" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-obfuscator) endif () if (ENABLE_CLICKHOUSE_GIT_IMPORT) add_custom_target (clickhouse-git-import ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-git-import DEPENDS clickhouse) - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/clickhouse-git-import DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-git-import" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-git-import) endif () diff --git a/programs/bash-completion/completions/clickhouse b/programs/bash-completion/completions/clickhouse index c4b77cf3f7a..fc55398dcf1 100644 --- a/programs/bash-completion/completions/clickhouse +++ b/programs/bash-completion/completions/clickhouse @@ -23,19 +23,9 @@ function _complete_for_clickhouse_entrypoint_bin() fi util="${words[1]}" - case "$prev" in - -C|--config-file|--config) - return - ;; - # Argh... This looks like a bash bug... - # Redirections are passed to the completion function - # although it is managed by the shell directly... - '<'|'>'|'>>'|[12]'>'|[12]'>>') - return - ;; - esac - - COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd" "$util")" -- "$cur") ) + if _complete_for_clickhouse_generic_bin_impl "$prev"; then + COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd" "$util")" -- "$cur") ) + fi return 0 } diff --git a/programs/bash-completion/completions/clickhouse-bootstrap b/programs/bash-completion/completions/clickhouse-bootstrap index dc8dcd5ad8d..7109148a192 100644 --- a/programs/bash-completion/completions/clickhouse-bootstrap +++ b/programs/bash-completion/completions/clickhouse-bootstrap @@ -15,6 +15,76 @@ shopt -s extglob export _CLICKHOUSE_COMPLETION_LOADED=1 +CLICKHOUSE_QueryProcessingStage=( + complete + fetch_columns + with_mergeable_state + with_mergeable_state_after_aggregation +) + +CLICKHOUSE_Format=( + CapnProto + PostgreSQLWire + MySQLWire + JSONStringsEachRowWithProgress + JSONEachRowWithProgress + JSONCompact + JSON + CSV + Vertical + ODBCDriver2 + PrettySpaceNoEscapes + Pretty + JSONCompactStrings + PrettyNoEscapes + ArrowStream + TabSeparatedWithNames + Parquet + Arrow + PrettyCompact + AvroConfluent + ORC + PrettyCompactNoEscapes + RawBLOB + Template + MsgPack + JSONCompactEachRow + CustomSeparated + TemplateIgnoreSpaces + Markdown + XML + ProtobufSingle + JSONCompactStringsEachRowWithNamesAndTypes + TSKV + TabSeparated + JSONStringEachRow + JSONStringsEachRow + TSVRaw + Values + TabSeparatedWithNamesAndTypes + PrettyCompactMonoBlock + TSVWithNamesAndTypes + Avro + RowBinaryWithNamesAndTypes + LineAsString + Native + JSONCompactEachRowWithNamesAndTypes + PrettySpace + Regexp + TSV + JSONEachRow + CustomSeparatedIgnoreSpaces + CSVWithNames + JSONStrings + Null + TabSeparatedRaw + TSVWithNames + Protobuf + RowBinary + JSONAsString + JSONCompactStringsEachRow +) + function _clickhouse_bin_exist() { [ -x "$1" ] || command -v "$1" >& /dev/null; } @@ -30,6 +100,37 @@ function _clickhouse_get_options() "$@" --help 2>&1 | awk -F '[ ,=<>]' '{ for (i=1; i <= NF; ++i) { if (substr($i, 0, 1) == "-" && length($i) > 1) print $i; } }' | sort -u } +function _complete_for_clickhouse_generic_bin_impl() +{ + local prev=$1 && shift + + case "$prev" in + -C|--config-file|--config) + return 1 + ;; + --stage) + COMPREPLY=( $(compgen -W "${CLICKHOUSE_QueryProcessingStage[*]}" -- "$cur") ) + return 1 + ;; + --format|--input-format|--output-format) + COMPREPLY=( $(compgen -W "${CLICKHOUSE_Format[*]}" -- "$cur") ) + return 1 + ;; + --host) + COMPREPLY=( $(compgen -A hostname -- "$cur") ) + return 1 + ;; + # Argh... This looks like a bash bug... + # Redirections are passed to the completion function + # although it is managed by the shell directly... + '<'|'>'|'>>'|[12]'>'|[12]'>>') + return 1 + ;; + esac + + return 0 +} + function _complete_for_clickhouse_generic_bin() { local cur prev @@ -39,19 +140,9 @@ function _complete_for_clickhouse_generic_bin() COMPREPLY=() _get_comp_words_by_ref cur prev - case "$prev" in - -C|--config-file|--config) - return - ;; - # Argh... This looks like a bash bug... - # Redirections are passed to the completion function - # although it is managed by the shell directly... - '<'|'>'|'>>'|[12]'>'|[12]'>>') - return - ;; - esac - - COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd")" -- "$cur") ) + if _complete_for_clickhouse_generic_bin_impl "$prev"; then + COMPREPLY=( $(compgen -W "$(_clickhouse_get_options "$cmd")" -- "$cur") ) + fi return 0 } diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index a0e2ea155ba..1d2b579db3a 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -95,8 +95,8 @@ public: comparison_info_total.emplace_back(std::make_shared()); } - global_context.makeGlobalContext(); - global_context.setSettings(settings); + global_context->makeGlobalContext(); + global_context->setSettings(settings); std::cerr << std::fixed << std::setprecision(3); @@ -159,7 +159,7 @@ private: bool print_stacktrace; const Settings & settings; SharedContextHolder shared_context; - Context global_context; + ContextPtr global_context; QueryProcessingStage::Enum query_processing_stage; /// Don't execute new queries after timelimit or SIGINT or exception diff --git a/programs/client/CMakeLists.txt b/programs/client/CMakeLists.txt index 72b5caf9784..084e1b45911 100644 --- a/programs/client/CMakeLists.txt +++ b/programs/client/CMakeLists.txt @@ -21,4 +21,4 @@ list(APPEND CLICKHOUSE_CLIENT_LINK PRIVATE readpassphrase) clickhouse_program_add(client) -install (FILES clickhouse-client.xml DESTINATION ${CLICKHOUSE_ETC_DIR}/clickhouse-client COMPONENT clickhouse-client RENAME config.xml) +install (FILES clickhouse-client.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/clickhouse-client" COMPONENT clickhouse-client RENAME config.xml) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 0c5bbaf3edd..ccf92ebc419 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1,7 +1,7 @@ -#include "TestHint.h" #include "ConnectionParameters.h" #include "QueryFuzzer.h" #include "Suggest.h" +#include "TestHint.h" #if USE_REPLXX # include @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -86,6 +86,7 @@ #include #include #include +#include #if !defined(ARCADIA_BUILD) # include @@ -95,13 +96,9 @@ #pragma GCC optimize("-fno-var-tracking-assignments") #endif -/// http://en.wikipedia.org/wiki/ANSI_escape_code -#define CLEAR_TO_END_OF_LINE "\033[K" - namespace DB { - namespace ErrorCodes { extern const int NETWORK_ERROR; @@ -119,8 +116,7 @@ namespace ErrorCodes static bool queryHasWithClause(const IAST * ast) { - if (const auto * select = dynamic_cast(ast); - select && select->with()) + if (const auto * select = dynamic_cast(ast); select && select->with()) { return true; } @@ -152,28 +148,22 @@ public: private: using StringSet = std::unordered_set; - StringSet exit_strings - { - "exit", "quit", "logout", - "учше", "йгше", "дщпщге", - "exit;", "quit;", "logout;", - "учшеж", "йгшеж", "дщпщгеж", - "q", "й", "\\q", "\\Q", "\\й", "\\Й", ":q", "Жй" - }; - bool is_interactive = true; /// Use either interactive line editing interface or batch mode. - bool need_render_progress = true; /// Render query execution progress. - bool echo_queries = false; /// Print queries before execution in batch mode. - bool ignore_error = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode. - bool print_time_to_stderr = false; /// Output execution time to stderr in batch mode. - bool stdin_is_a_tty = false; /// stdin is a terminal. - bool stdout_is_a_tty = false; /// stdout is a terminal. + StringSet exit_strings{"exit", "quit", "logout", "учше", "йгше", "дщпщге", "exit;", "quit;", "logout;", "учшеж", + "йгшеж", "дщпщгеж", "q", "й", "\\q", "\\Q", "\\й", "\\Й", ":q", "Жй"}; + bool is_interactive = true; /// Use either interactive line editing interface or batch mode. + bool echo_queries = false; /// Print queries before execution in batch mode. + bool ignore_error + = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode. + bool print_time_to_stderr = false; /// Output execution time to stderr in batch mode. + bool stdin_is_a_tty = false; /// stdin is a terminal. + bool stdout_is_a_tty = false; /// stdout is a terminal. /// If not empty, queries will be read from these files std::vector queries_files; /// If not empty, run queries from these files before processing every file from 'queries_files'. std::vector interleave_queries_files; - std::unique_ptr connection; /// Connection to DB. + std::unique_ptr connection; /// Connection to DB. String full_query; /// Current query as it was given to the client. // Current query as it will be sent to the server. It may differ from the @@ -181,23 +171,23 @@ private: // is stripped and sent separately. String query_to_send; - String format; /// Query results output format. - bool is_default_format = true; /// false, if format is set in the config or command line. - size_t format_max_block_size = 0; /// Max block size for console output. - String insert_format; /// Format of INSERT data that is read from stdin in batch mode. + String format; /// Query results output format. + bool is_default_format = true; /// false, if format is set in the config or command line. + size_t format_max_block_size = 0; /// Max block size for console output. + String insert_format; /// Format of INSERT data that is read from stdin in batch mode. size_t insert_format_max_block_size = 0; /// Max block size when reading INSERT data. size_t max_client_network_bandwidth = 0; /// The maximum speed of data exchange over the network for the client in bytes per second. bool has_vertical_output_suffix = false; /// Is \G present at the end of the query string? SharedContextHolder shared_context = Context::createShared(); - Context context = Context::createGlobal(shared_context.get()); + ContextPtr context = Context::createGlobal(shared_context.get()); /// Buffer that reads from stdin in batch mode. - ReadBufferFromFileDescriptor std_in {STDIN_FILENO}; + ReadBufferFromFileDescriptor std_in{STDIN_FILENO}; /// Console output. - WriteBufferFromFileDescriptor std_out {STDOUT_FILENO}; + WriteBufferFromFileDescriptor std_out{STDOUT_FILENO}; std::unique_ptr pager_cmd; /// The user can specify to redirect query output to a file. @@ -242,10 +232,9 @@ private: /// The server periodically sends information about how much data was read since last time. Progress progress; - bool show_progress_bar = false; - size_t written_progress_chars = 0; - bool written_first_block = false; + /// Progress bar + ProgressBar progress_bar; /// External tables info. std::list external_tables; @@ -274,20 +263,20 @@ private: configReadClient(config(), home_path); - context.setApplicationType(Context::ApplicationType::CLIENT); - context.setQueryParameters(query_parameters); + context->setApplicationType(Context::ApplicationType::CLIENT); + context->setQueryParameters(query_parameters); /// settings and limits could be specified in config file, but passed settings has higher priority - for (const auto & setting : context.getSettingsRef().allUnchanged()) + for (const auto & setting : context->getSettingsRef().allUnchanged()) { const auto & name = setting.getName(); if (config().has(name)) - context.setSetting(name, config().getString(name)); + context->setSetting(name, config().getString(name)); } /// Set path for format schema files if (config().has("format_schema_path")) - context.setFormatSchemaPath(Poco::Path(config().getString("format_schema_path")).toString()); + context->setFormatSchemaPath(Poco::Path(config().getString("format_schema_path")).toString()); /// Initialize query_id_formats if any if (config().has("query_id_formats")) @@ -322,16 +311,13 @@ private: if (std::string::npos != embedded_stack_trace_pos && !print_stack_trace) text.resize(embedded_stack_trace_pos); - std::cerr << "Code: " << e.code() << ". " << text << std::endl << std::endl; + std::cerr << "Code: " << e.code() << ". " << text << std::endl << std::endl; /// Don't print the stack trace on the client if it was logged on the server. /// Also don't print the stack trace in case of network errors. - if (print_stack_trace - && e.code() != ErrorCodes::NETWORK_ERROR - && std::string::npos == embedded_stack_trace_pos) + if (print_stack_trace && e.code() != ErrorCodes::NETWORK_ERROR && std::string::npos == embedded_stack_trace_pos) { - std::cerr << "Stack trace:" << std::endl - << e.getStackTraceString(); + std::cerr << "Stack trace:" << std::endl << e.getStackTraceString(); } /// If exception code isn't zero, we should return non-zero return code anyway. @@ -354,8 +340,7 @@ private: return false; LocalDate now(current_time); - return (now.month() == 12 && now.day() >= 20) - || (now.month() == 1 && now.day() <= 5); + return (now.month() == 12 && now.day() >= 20) || (now.month() == 1 && now.day() <= 5); } static bool isChineseNewYearMode(const String & local_tz) @@ -406,9 +391,9 @@ private: if (chineseNewYearTimeZoneIndicators + M == std::find_if(chineseNewYearTimeZoneIndicators, chineseNewYearTimeZoneIndicators + M, [&local_tz](const char * tz) - { - return tz == local_tz; - })) + { + return tz == local_tz; + })) return false; /// It's bad to be intrusive. @@ -432,52 +417,51 @@ private: { using namespace replxx; - static const std::unordered_map token_to_color = - { - { TokenType::Whitespace, Replxx::Color::DEFAULT }, - { TokenType::Comment, Replxx::Color::GRAY }, - { TokenType::BareWord, Replxx::Color::DEFAULT }, - { TokenType::Number, Replxx::Color::GREEN }, - { TokenType::StringLiteral, Replxx::Color::CYAN }, - { TokenType::QuotedIdentifier, Replxx::Color::MAGENTA }, - { TokenType::OpeningRoundBracket, Replxx::Color::BROWN }, - { TokenType::ClosingRoundBracket, Replxx::Color::BROWN }, - { TokenType::OpeningSquareBracket, Replxx::Color::BROWN }, - { TokenType::ClosingSquareBracket, Replxx::Color::BROWN }, - { TokenType::OpeningCurlyBrace, Replxx::Color::INTENSE }, - { TokenType::ClosingCurlyBrace, Replxx::Color::INTENSE }, + static const std::unordered_map token_to_color + = {{TokenType::Whitespace, Replxx::Color::DEFAULT}, + {TokenType::Comment, Replxx::Color::GRAY}, + {TokenType::BareWord, Replxx::Color::DEFAULT}, + {TokenType::Number, Replxx::Color::GREEN}, + {TokenType::StringLiteral, Replxx::Color::CYAN}, + {TokenType::QuotedIdentifier, Replxx::Color::MAGENTA}, + {TokenType::OpeningRoundBracket, Replxx::Color::BROWN}, + {TokenType::ClosingRoundBracket, Replxx::Color::BROWN}, + {TokenType::OpeningSquareBracket, Replxx::Color::BROWN}, + {TokenType::ClosingSquareBracket, Replxx::Color::BROWN}, + {TokenType::OpeningCurlyBrace, Replxx::Color::INTENSE}, + {TokenType::ClosingCurlyBrace, Replxx::Color::INTENSE}, - { TokenType::Comma, Replxx::Color::INTENSE }, - { TokenType::Semicolon, Replxx::Color::INTENSE }, - { TokenType::Dot, Replxx::Color::INTENSE }, - { TokenType::Asterisk, Replxx::Color::INTENSE }, - { TokenType::Plus, Replxx::Color::INTENSE }, - { TokenType::Minus, Replxx::Color::INTENSE }, - { TokenType::Slash, Replxx::Color::INTENSE }, - { TokenType::Percent, Replxx::Color::INTENSE }, - { TokenType::Arrow, Replxx::Color::INTENSE }, - { TokenType::QuestionMark, Replxx::Color::INTENSE }, - { TokenType::Colon, Replxx::Color::INTENSE }, - { TokenType::Equals, Replxx::Color::INTENSE }, - { TokenType::NotEquals, Replxx::Color::INTENSE }, - { TokenType::Less, Replxx::Color::INTENSE }, - { TokenType::Greater, Replxx::Color::INTENSE }, - { TokenType::LessOrEquals, Replxx::Color::INTENSE }, - { TokenType::GreaterOrEquals, Replxx::Color::INTENSE }, - { TokenType::Concatenation, Replxx::Color::INTENSE }, - { TokenType::At, Replxx::Color::INTENSE }, - { TokenType::DoubleAt, Replxx::Color::MAGENTA }, + {TokenType::Comma, Replxx::Color::INTENSE}, + {TokenType::Semicolon, Replxx::Color::INTENSE}, + {TokenType::Dot, Replxx::Color::INTENSE}, + {TokenType::Asterisk, Replxx::Color::INTENSE}, + {TokenType::Plus, Replxx::Color::INTENSE}, + {TokenType::Minus, Replxx::Color::INTENSE}, + {TokenType::Slash, Replxx::Color::INTENSE}, + {TokenType::Percent, Replxx::Color::INTENSE}, + {TokenType::Arrow, Replxx::Color::INTENSE}, + {TokenType::QuestionMark, Replxx::Color::INTENSE}, + {TokenType::Colon, Replxx::Color::INTENSE}, + {TokenType::Equals, Replxx::Color::INTENSE}, + {TokenType::NotEquals, Replxx::Color::INTENSE}, + {TokenType::Less, Replxx::Color::INTENSE}, + {TokenType::Greater, Replxx::Color::INTENSE}, + {TokenType::LessOrEquals, Replxx::Color::INTENSE}, + {TokenType::GreaterOrEquals, Replxx::Color::INTENSE}, + {TokenType::Concatenation, Replxx::Color::INTENSE}, + {TokenType::At, Replxx::Color::INTENSE}, + {TokenType::DoubleAt, Replxx::Color::MAGENTA}, - { TokenType::EndOfStream, Replxx::Color::DEFAULT }, + {TokenType::EndOfStream, Replxx::Color::DEFAULT}, - { TokenType::Error, Replxx::Color::RED }, - { TokenType::ErrorMultilineCommentIsNotClosed, Replxx::Color::RED }, - { TokenType::ErrorSingleQuoteIsNotClosed, Replxx::Color::RED }, - { TokenType::ErrorDoubleQuoteIsNotClosed, Replxx::Color::RED }, - { TokenType::ErrorSinglePipeMark, Replxx::Color::RED }, - { TokenType::ErrorWrongNumber, Replxx::Color::RED }, - { TokenType::ErrorMaxQuerySizeExceeded, Replxx::Color::RED } - }; + {TokenType::Error, Replxx::Color::RED}, + {TokenType::ErrorMultilineCommentIsNotClosed, Replxx::Color::RED}, + {TokenType::ErrorSingleQuoteIsNotClosed, Replxx::Color::RED}, + {TokenType::ErrorDoubleQuoteIsNotClosed, Replxx::Color::RED}, + {TokenType::ErrorSinglePipeMark, Replxx::Color::RED}, + {TokenType::ErrorWrongNumber, Replxx::Color::RED}, + { TokenType::ErrorMaxQuerySizeExceeded, + Replxx::Color::RED }}; const Replxx::Color unknown_token_color = Replxx::Color::RED; @@ -538,24 +522,24 @@ private: else format = config().getString("format", is_interactive ? "PrettyCompact" : "TabSeparated"); - format_max_block_size = config().getInt("format_max_block_size", context.getSettingsRef().max_block_size); + format_max_block_size = config().getInt("format_max_block_size", context->getSettingsRef().max_block_size); insert_format = "Values"; /// Setting value from cmd arg overrides one from config - if (context.getSettingsRef().max_insert_block_size.changed) - insert_format_max_block_size = context.getSettingsRef().max_insert_block_size; + if (context->getSettingsRef().max_insert_block_size.changed) + insert_format_max_block_size = context->getSettingsRef().max_insert_block_size; else - insert_format_max_block_size = config().getInt("insert_format_max_block_size", context.getSettingsRef().max_insert_block_size); + insert_format_max_block_size = config().getInt("insert_format_max_block_size", context->getSettingsRef().max_insert_block_size); if (!is_interactive) { - need_render_progress = config().getBool("progress", false); + progress_bar.need_render_progress = config().getBool("progress", false); echo_queries = config().getBool("echo", false); ignore_error = config().getBool("ignore-error", false); } - ClientInfo & client_info = context.getClientInfo(); + ClientInfo & client_info = context->getClientInfo(); client_info.setInitialQuery(); client_info.quota_key = config().getString("quota_key", ""); @@ -563,7 +547,7 @@ private: /// Initialize DateLUT here to avoid counting time spent here as query execution time. const auto local_tz = DateLUT::instance().getTimeZone(); - if (!context.getSettingsRef().use_client_time_zone) + if (!context->getSettingsRef().use_client_time_zone) { const auto & time_zone = connection->getServerTimezone(connection_parameters.timeouts); if (!time_zone.empty()) @@ -575,16 +559,16 @@ private: catch (...) { std::cerr << "Warning: could not switch to server time zone: " << time_zone - << ", reason: " << getCurrentExceptionMessage(/* with_stacktrace = */ false) << std::endl - << "Proceeding with local time zone." - << std::endl << std::endl; + << ", reason: " << getCurrentExceptionMessage(/* with_stacktrace = */ false) << std::endl + << "Proceeding with local time zone." << std::endl + << std::endl; } } else { std::cerr << "Warning: could not determine server time zone. " - << "Proceeding with local time zone." - << std::endl << std::endl; + << "Proceeding with local time zone." << std::endl + << std::endl; } } @@ -612,8 +596,7 @@ private: } /// Prompt may contain the following substitutions in a form of {name}. - std::map prompt_substitutions - { + std::map prompt_substitutions{ {"host", connection_parameters.host}, {"port", toString(connection_parameters.port)}, {"user", connection_parameters.user}, @@ -621,7 +604,7 @@ private: }; /// Quite suboptimal. - for (const auto & [key, value]: prompt_substitutions) + for (const auto & [key, value] : prompt_substitutions) boost::replace_all(prompt_by_server_display_name, "{" + key + "}", value); if (is_interactive) @@ -661,13 +644,7 @@ private: if (config().getBool("highlight")) highlight_callback = highlight; - ReplxxLineReader lr( - *suggest, - history_file, - config().has("multiline"), - query_extenders, - query_delimiters, - highlight_callback); + ReplxxLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters, highlight_callback); #elif defined(USE_READLINE) && USE_READLINE ReadlineLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters); @@ -704,8 +681,8 @@ private: // We don't need to handle the test hints in the interactive // mode. std::cerr << std::endl - << "Exception on client:" << std::endl - << "Code: " << e.code() << ". " << e.displayText() << std::endl; + << "Exception on client:" << std::endl + << "Code: " << e.code() << ". " << e.displayText() << std::endl; if (config().getBool("stacktrace", false)) std::cerr << "Stack trace:" << std::endl << e.getStackTraceString() << std::endl; @@ -723,8 +700,7 @@ private: /// So we reconnect and allow to enter the next query. connect(); } - } - while (true); + } while (true); if (isNewYearMode()) std::cout << "Happy new year." << std::endl; @@ -738,14 +714,13 @@ private: { auto query_id = config().getString("query_id", ""); if (!query_id.empty()) - context.setCurrentQueryId(query_id); + context->setCurrentQueryId(query_id); nonInteractive(); // If exception code isn't zero, we should return non-zero return // code anyway. - const auto * exception = server_exception - ? server_exception.get() : client_exception.get(); + const auto * exception = server_exception ? server_exception.get() : client_exception.get(); if (exception) { return exception->code() != 0 ? exception->code() : -1; @@ -768,10 +743,10 @@ private: if (is_interactive) std::cout << "Connecting to " - << (!connection_parameters.default_database.empty() ? "database " + connection_parameters.default_database + " at " : "") - << connection_parameters.host << ":" << connection_parameters.port - << (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") - << "." << std::endl; + << (!connection_parameters.default_database.empty() ? "database " + connection_parameters.default_database + " at " + : "") + << connection_parameters.host << ":" << connection_parameters.port + << (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl; connection = std::make_unique( connection_parameters.host, @@ -796,8 +771,8 @@ private: connection->setThrottler(throttler); } - connection->getServerVersion(connection_parameters.timeouts, - server_name, server_version_major, server_version_minor, server_version_patch, server_revision); + connection->getServerVersion( + connection_parameters.timeouts, server_name, server_version_major, server_version_minor, server_version_patch, server_revision); server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_version_patch); @@ -808,10 +783,9 @@ private: if (is_interactive) { - std::cout << "Connected to " << server_name - << " server version " << server_version - << " revision " << server_revision - << "." << std::endl << std::endl; + std::cout << "Connected to " << server_name << " server version " << server_version << " revision " << server_revision << "." + << std::endl + << std::endl; auto client_version_tuple = std::make_tuple(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH); auto server_version_tuple = std::make_tuple(server_version_major, server_version_minor, server_version_patch); @@ -819,14 +793,14 @@ private: if (client_version_tuple < server_version_tuple) { std::cout << "ClickHouse client version is older than ClickHouse server. " - << "It may lack support for new features." - << std::endl << std::endl; + << "It may lack support for new features." << std::endl + << std::endl; } else if (client_version_tuple > server_version_tuple) { std::cout << "ClickHouse server version is older than ClickHouse client. " - << "It may indicate that the server is out of date and can be upgraded." - << std::endl << std::endl; + << "It may indicate that the server is out of date and can be upgraded." << std::endl + << std::endl; } } } @@ -883,7 +857,7 @@ private: bool processQueryText(const String & text) { - if (exit_strings.end() != exit_strings.find(trim(text, [](char c){ return isWhitespaceASCII(c) || c == ';'; }))) + if (exit_strings.end() != exit_strings.find(trim(text, [](char c) { return isWhitespaceASCII(c) || c == ';'; }))) return false; if (!config().has("multiquery")) @@ -904,15 +878,13 @@ private: // Consumes trailing semicolons and tries to consume the same-line trailing // comment. - static void adjustQueryEnd(const char *& this_query_end, - const char * all_queries_end, int max_parser_depth) + static void adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, int max_parser_depth) { // We have to skip the trailing semicolon that might be left // after VALUES parsing or just after a normal semicolon-terminated query. Tokens after_query_tokens(this_query_end, all_queries_end); IParser::Pos after_query_iterator(after_query_tokens, max_parser_depth); - while (after_query_iterator.isValid() - && after_query_iterator->type == TokenType::Semicolon) + while (after_query_iterator.isValid() && after_query_iterator->type == TokenType::Semicolon) { this_query_end = after_query_iterator->end; ++after_query_iterator; @@ -929,8 +901,7 @@ private: // newline is some trailing whitespace or comment, and we should // add it to our query. There are also several special cases // that are described below. - const auto * newline = find_first_symbols<'\n'>(this_query_end, - all_queries_end); + const auto * newline = find_first_symbols<'\n'>(this_query_end, all_queries_end); const char * next_query_begin = after_query_iterator->begin; // We include the entire line if the next query starts after @@ -956,14 +927,12 @@ private: { std::string text = server_exception->displayText(); auto embedded_stack_trace_pos = text.find("Stack trace"); - if (std::string::npos != embedded_stack_trace_pos - && !config().getBool("stacktrace", false)) + if (std::string::npos != embedded_stack_trace_pos && !config().getBool("stacktrace", false)) { text.resize(embedded_stack_trace_pos); } - std::cerr << "Received exception from server (version " - << server_version << "):" << std::endl << "Code: " - << server_exception->code() << ". " << text << std::endl; + std::cerr << "Received exception from server (version " << server_version << "):" << std::endl + << "Code: " << server_exception->code() << ". " << text << std::endl; if (is_interactive) { std::cerr << std::endl; @@ -972,9 +941,7 @@ private: if (client_exception) { - fmt::print(stderr, - "Error on processing query '{}':\n{}\n", - full_query, client_exception->message()); + fmt::print(stderr, "Error on processing query '{}':\n{}\n", full_query, client_exception->message()); if (is_interactive) { fmt::print(stderr, "\n"); @@ -1018,8 +985,7 @@ private: // Remove leading empty newlines and other whitespace, because they // are annoying to filter in query log. This is mostly relevant for // the tests. - while (this_query_begin < all_queries_end - && isWhitespaceASCII(*this_query_begin)) + while (this_query_begin < all_queries_end && isWhitespaceASCII(*this_query_begin)) { ++this_query_begin; } @@ -1037,8 +1003,7 @@ private: // and it makes more sense to treat them as such. { Tokens tokens(this_query_begin, all_queries_end); - IParser::Pos token_iterator(tokens, - context.getSettingsRef().max_parser_depth); + IParser::Pos token_iterator(tokens, context->getSettingsRef().max_parser_depth); if (!token_iterator.isValid()) { break; @@ -1056,17 +1021,14 @@ private: // Try to find test hint for syntax error. We don't know where // the query ends because we failed to parse it, so we consume // the entire line. - this_query_end = find_first_symbols<'\n'>(this_query_end, - all_queries_end); + this_query_end = find_first_symbols<'\n'>(this_query_end, all_queries_end); - TestHint hint(test_mode, - String(this_query_begin, this_query_end - this_query_begin)); + TestHint hint(test_mode, String(this_query_begin, this_query_end - this_query_begin)); if (hint.serverError()) { // Syntax errors are considered as client errors - e.addMessage("\nExpected server error '{}'.", - hint.serverError()); + e.addMessage("\nExpected server error '{}'.", hint.serverError()); throw; } @@ -1087,7 +1049,7 @@ private: if (ignore_error) { Tokens tokens(this_query_begin, all_queries_end); - IParser::Pos token_iterator(tokens, context.getSettingsRef().max_parser_depth); + IParser::Pos token_iterator(tokens, context->getSettingsRef().max_parser_depth); while (token_iterator->type != TokenType::Semicolon && token_iterator.isValid()) ++token_iterator; this_query_begin = token_iterator->end; @@ -1112,18 +1074,13 @@ private: auto * insert_ast = parsed_query->as(); if (insert_ast && insert_ast->data) { - this_query_end = find_first_symbols<'\n'>(insert_ast->data, - all_queries_end); + this_query_end = find_first_symbols<'\n'>(insert_ast->data, all_queries_end); insert_ast->end = this_query_end; - query_to_send = all_queries_text.substr( - this_query_begin - all_queries_text.data(), - insert_ast->data - this_query_begin); + query_to_send = all_queries_text.substr(this_query_begin - all_queries_text.data(), insert_ast->data - this_query_begin); } else { - query_to_send = all_queries_text.substr( - this_query_begin - all_queries_text.data(), - this_query_end - this_query_begin); + query_to_send = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin); } // Try to include the trailing comment with test hints. It is just @@ -1132,14 +1089,11 @@ private: // after we have processed the query. But even this guess is // beneficial so that we see proper trailing comments in "echo" and // server log. - adjustQueryEnd(this_query_end, all_queries_end, - context.getSettingsRef().max_parser_depth); + adjustQueryEnd(this_query_end, all_queries_end, context->getSettingsRef().max_parser_depth); // full_query is the query + inline INSERT data + trailing comments // (the latter is our best guess for now). - full_query = all_queries_text.substr( - this_query_begin - all_queries_text.data(), - this_query_end - this_query_begin); + full_query = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin); if (query_fuzzer_runs) { @@ -1158,8 +1112,7 @@ private: { // Surprisingly, this is a client error. A server error would // have been reported w/o throwing (see onReceiveSeverException()). - client_exception = std::make_unique( - getCurrentExceptionMessage(true), getCurrentExceptionCode()); + client_exception = std::make_unique(getCurrentExceptionMessage(true), getCurrentExceptionCode()); have_error = true; } @@ -1172,8 +1125,7 @@ private: if (insert_ast && insert_ast->data) { this_query_end = insert_ast->end; - adjustQueryEnd(this_query_end, all_queries_end, - context.getSettingsRef().max_parser_depth); + adjustQueryEnd(this_query_end, all_queries_end, context->getSettingsRef().max_parser_depth); } // Now we know for sure where the query ends. @@ -1181,8 +1133,7 @@ private: // comments, // e.g. insert into t format CSV 'a' -- { serverError 123 }. // Use the updated query boundaries we just calculated. - TestHint test_hint(test_mode, std::string(this_query_begin, - this_query_end - this_query_begin)); + TestHint test_hint(test_mode, std::string(this_query_begin, this_query_end - this_query_begin)); // Check whether the error (or its absence) matches the test hints // (or their absence). @@ -1194,16 +1145,13 @@ private: if (!server_exception) { error_matches_hint = false; - fmt::print(stderr, - "Expected server error code '{}' but got no server error.\n", - test_hint.serverError()); + fmt::print(stderr, "Expected server error code '{}' but got no server error.\n", test_hint.serverError()); } else if (server_exception->code() != test_hint.serverError()) { error_matches_hint = false; - std::cerr << "Expected server error code: " << - test_hint.serverError() << " but got: " << - server_exception->code() << "." << std::endl; + std::cerr << "Expected server error code: " << test_hint.serverError() << " but got: " << server_exception->code() + << "." << std::endl; } } @@ -1212,17 +1160,13 @@ private: if (!client_exception) { error_matches_hint = false; - fmt::print(stderr, - "Expected client error code '{}' but got no client error.\n", - test_hint.clientError()); + fmt::print(stderr, "Expected client error code '{}' but got no client error.\n", test_hint.clientError()); } else if (client_exception->code() != test_hint.clientError()) { error_matches_hint = false; - fmt::print(stderr, - "Expected client error code '{}' but got '{}'.\n", - test_hint.clientError(), - client_exception->code()); + fmt::print( + stderr, "Expected client error code '{}' but got '{}'.\n", test_hint.clientError(), client_exception->code()); } } @@ -1238,17 +1182,13 @@ private: { if (test_hint.clientError()) { - fmt::print(stderr, - "The query succeeded but the client error '{}' was expected.\n", - test_hint.clientError()); + fmt::print(stderr, "The query succeeded but the client error '{}' was expected.\n", test_hint.clientError()); error_matches_hint = false; } if (test_hint.serverError()) { - fmt::print(stderr, - "The query succeeded but the server error '{}' was expected.\n", - test_hint.serverError()); + fmt::print(stderr, "The query succeeded but the server error '{}' was expected.\n", test_hint.serverError()); error_matches_hint = false; } } @@ -1290,7 +1230,7 @@ private: // Prints changed settings to stderr. Useful for debugging fuzzing failures. void printChangedSettings() const { - const auto & changes = context.getSettingsRef().changes(); + const auto & changes = context->getSettingsRef().changes(); if (!changes.empty()) { fmt::print(stderr, "Changed settings: "); @@ -1300,8 +1240,7 @@ private: { fmt::print(stderr, ", "); } - fmt::print(stderr, "{} = '{}'", changes[i].name, - toString(changes[i].value)); + fmt::print(stderr, "{} = '{}'", changes[i].name, toString(changes[i].value)); } fmt::print(stderr, "\n"); } @@ -1349,8 +1288,7 @@ private: ASTPtr fuzz_base = orig_ast; for (size_t fuzz_step = 0; fuzz_step < this_query_runs; ++fuzz_step) { - fmt::print(stderr, "Fuzzing step {} out of {}\n", - fuzz_step, this_query_runs); + fmt::print(stderr, "Fuzzing step {} out of {}\n", fuzz_step, this_query_runs); ASTPtr ast_to_process; try @@ -1380,10 +1318,12 @@ private: { printChangedSettings(); - fmt::print(stderr, + fmt::print( + stderr, "Base before fuzz: {}\n" "Base after fuzz: {}\n", - base_before_fuzz, base_after_fuzz); + base_before_fuzz, + base_after_fuzz); fmt::print(stderr, "Dump before fuzz:\n{}\n", dump_before_fuzz.str()); fmt::print(stderr, "Dump of cloned AST:\n{}\n", dump_of_cloned_ast.str()); fmt::print(stderr, "Dump after fuzz:\n"); @@ -1392,7 +1332,9 @@ private: fuzz_base->dumpTree(cerr_buf); cerr_buf.next(); - fmt::print(stderr, "IAST::clone() is broken for some AST node. This is a bug. The original AST ('dump before fuzz') and its cloned copy ('dump of cloned AST') refer to the same nodes, which must never happen. This means that their parent node doesn't implement clone() correctly."); + fmt::print( + stderr, + "IAST::clone() is broken for some AST node. This is a bug. The original AST ('dump before fuzz') and its cloned copy ('dump of cloned AST') refer to the same nodes, which must never happen. This means that their parent node doesn't implement clone() correctly."); exit(1); } @@ -1416,18 +1358,14 @@ private: // uniformity. // Surprisingly, this is a client exception, because we get the // server exception w/o throwing (see onReceiveException()). - client_exception = std::make_unique( - getCurrentExceptionMessage(true), getCurrentExceptionCode()); + client_exception = std::make_unique(getCurrentExceptionMessage(true), getCurrentExceptionCode()); have_error = true; } if (have_error) { - const auto * exception = server_exception - ? server_exception.get() : client_exception.get(); - fmt::print(stderr, "Error on processing query '{}': {}\n", - ast_to_process->formatForErrorMessage(), - exception->message()); + const auto * exception = server_exception ? server_exception.get() : client_exception.get(); + fmt::print(stderr, "Error on processing query '{}': {}\n", ast_to_process->formatForErrorMessage(), exception->message()); } if (!connection->isConnected()) @@ -1461,18 +1399,26 @@ private: // when `lambda()` function gets substituted into a wrong place. // To avoid dealing with these cases, run the check only for the // queries we were able to successfully execute. - // The final caveat is that sometimes WITH queries are not executed, + // Another caveat is that sometimes WITH queries are not executed, // if they are not referenced by the main SELECT, so they can still // have the aforementioned problems. Disable this check for such // queries, for lack of a better solution. - if (!have_error && queryHasWithClause(parsed_query.get())) + // There is also a problem that fuzzer substitutes positive Int64 + // literals or Decimal literals, which are then parsed back as + // UInt64, and suddenly duplicate alias substitition starts or stops + // working (ASTWithAlias::formatImpl) or something like that. + // So we compare not even the first and second formatting of the + // query, but second and third. + // If you have to add any more workarounds to this check, just remove + // it altogether, it's not so useful. + if (!have_error && !queryHasWithClause(parsed_query.get())) { - ASTPtr parsed_formatted_query; + ASTPtr ast_2; try { const auto * tmp_pos = query_to_send.c_str(); - parsed_formatted_query = parseQuery(tmp_pos, - tmp_pos + query_to_send.size(), + + ast_2 = parseQuery(tmp_pos, tmp_pos + query_to_send.size(), false /* allow_multi_statements */); } catch (Exception & e) @@ -1483,25 +1429,30 @@ private: } } - if (parsed_formatted_query) + if (ast_2) { - const auto formatted_twice - = parsed_formatted_query->formatForErrorMessage(); - - if (formatted_twice != query_to_send) + const auto text_2 = ast_2->formatForErrorMessage(); + const auto * tmp_pos = text_2.c_str(); + const auto ast_3 = parseQuery(tmp_pos, tmp_pos + text_2.size(), + false /* allow_multi_statements */); + const auto text_3 = ast_3->formatForErrorMessage(); + if (text_3 != text_2) { fmt::print(stderr, "The query formatting is broken.\n"); printChangedSettings(); - fmt::print(stderr, "Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", - formatted_twice, query_to_send); + fmt::print(stderr, + "Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", + text_3, text_2); fmt::print(stderr, "In more detail:\n"); - fmt::print(stderr, "AST-1:\n'{}'\n", parsed_query->dumpTree()); + fmt::print(stderr, "AST-1 (generated by fuzzer):\n'{}'\n", parsed_query->dumpTree()); fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", query_to_send); - fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", parsed_formatted_query->dumpTree()); - fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", formatted_twice); - fmt::print(stderr, "Text-1 must be equal to Text-2, but it is not.\n"); + fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", ast_2->dumpTree()); + fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", text_2); + fmt::print(stderr, "AST-3 (Text-2 parsed):\n'{}'\n", ast_3->dumpTree()); + fmt::print(stderr, "Text-3 (AST-3 formatted):\n'{}'\n", text_3); + fmt::print(stderr, "Text-3 must be equal to Text-2, but it is not.\n"); exit(1); } @@ -1518,6 +1469,11 @@ private: server_exception.reset(); client_exception.reset(); have_error = false; + + // We have to reinitialize connection after errors, because it + // might have gotten into a wrong state and we'll get false + // positives about "Unknown packet from server". + connection->forceConnected(connection_parameters.timeouts); } else if (ast_to_process->formatForErrorMessage().size() > 500) { @@ -1590,11 +1546,11 @@ private: if (is_interactive) { // Generate a new query_id - context.setCurrentQueryId(""); + context->setCurrentQueryId(""); for (const auto & query_id_format : query_id_formats) { writeString(query_id_format.first, std_out); - writeString(fmt::format(query_id_format.second, fmt::arg("query_id", context.getCurrentQueryId())), std_out); + writeString(fmt::format(query_id_format.second, fmt::arg("query_id", context->getCurrentQueryId())), std_out); writeChar('\n', std_out); std_out.next(); } @@ -1603,19 +1559,22 @@ private: watch.restart(); processed_rows = 0; progress.reset(); - show_progress_bar = false; - written_progress_chars = 0; - written_first_block = false; + progress_bar.show_progress_bar = false; + progress_bar.written_progress_chars = 0; + progress_bar.written_first_block = false; { /// Temporarily apply query settings to context. std::optional old_settings; - SCOPE_EXIT({ if (old_settings) context.setSettings(*old_settings); }); + SCOPE_EXIT_SAFE({ + if (old_settings) + context->setSettings(*old_settings); + }); auto apply_query_settings = [&](const IAST & settings_ast) { if (!old_settings) - old_settings.emplace(context.getSettingsRef()); - context.applySettingsChanges(settings_ast.as()->changes); + old_settings.emplace(context->getSettingsRef()); + context->applySettingsChanges(settings_ast.as()->changes); }; const auto * insert = parsed_query->as(); if (insert && insert->settings_ast) @@ -1653,7 +1612,7 @@ private: if (change.name == "profile") current_profile = change.value.safeGet(); else - context.applySettingChange(change); + context->applySettingChange(change); } } @@ -1669,8 +1628,7 @@ private: if (is_interactive) { - std::cout << std::endl - << processed_rows << " rows in set. Elapsed: " << watch.elapsedSeconds() << " sec. "; + std::cout << std::endl << processed_rows << " rows in set. Elapsed: " << watch.elapsedSeconds() << " sec. "; if (progress.read_rows >= 1000) writeFinalProgress(); @@ -1725,10 +1683,10 @@ private: connection->sendQuery( connection_parameters.timeouts, query_to_send, - context.getCurrentQueryId(), + context->getCurrentQueryId(), query_processing_stage, - &context.getSettingsRef(), - &context.getClientInfo(), + &context->getSettingsRef(), + &context->getClientInfo(), true); sendExternalTables(); @@ -1740,12 +1698,10 @@ private: { /// Retry when the server said "Client should retry" and no rows /// has been received yet. - if (processed_rows == 0 - && e.code() == ErrorCodes::DEADLOCK_AVOIDED - && --retries_left) + if (processed_rows == 0 && e.code() == ErrorCodes::DEADLOCK_AVOIDED && --retries_left) { std::cerr << "Got a transient error from the server, will" - << " retry (" << retries_left << " retries left)"; + << " retry (" << retries_left << " retries left)"; } else { @@ -1766,10 +1722,10 @@ private: connection->sendQuery( connection_parameters.timeouts, query_to_send, - context.getCurrentQueryId(), + context->getCurrentQueryId(), query_processing_stage, - &context.getSettingsRef(), - &context.getClientInfo(), + &context->getSettingsRef(), + &context->getClientInfo(), true); sendExternalTables(); @@ -1787,12 +1743,12 @@ private: } - ASTPtr parseQuery(const char * & pos, const char * end, bool allow_multi_statements) + ASTPtr parseQuery(const char *& pos, const char * end, bool allow_multi_statements) { ParserQuery parser(end); ASTPtr res; - const auto & settings = context.getSettingsRef(); + const auto & settings = context->getSettingsRef(); size_t max_length = 0; if (!allow_multi_statements) max_length = settings.max_query_size; @@ -1880,8 +1836,7 @@ private: current_format = insert->format; } - BlockInputStreamPtr block_input = context.getInputFormat( - current_format, buf, sample, insert_format_max_block_size); + BlockInputStreamPtr block_input = context->getInputFormat(current_format, buf, sample, insert_format_max_block_size); if (columns_description.hasDefaults()) block_input = std::make_shared(block_input, columns_description, context); @@ -1974,13 +1929,12 @@ private: /// to avoid losing sync. if (!cancelled) { - auto cancel_query = [&] - { + auto cancel_query = [&] { connection->sendCancel(); cancelled = true; if (is_interactive) { - clearProgress(); + progress_bar.clearProgress(); std::cout << "Cancelling query." << std::endl; } @@ -2069,8 +2023,8 @@ private: return false; default: - throw Exception(ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}", - packet.type, connection->getDescription()); + throw Exception( + ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Unknown packet {} from server {}", packet.type, connection->getDescription()); } } @@ -2101,8 +2055,10 @@ private: return receiveSampleBlock(out, columns_description); default: - throw NetException("Unexpected packet from server (expected Data, Exception or Log, got " - + String(Protocol::Server::toString(packet.type)) + ")", ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER); + throw NetException( + "Unexpected packet from server (expected Data, Exception or Log, got " + + String(Protocol::Server::toString(packet.type)) + ")", + ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER); } } } @@ -2130,8 +2086,10 @@ private: break; default: - throw NetException("Unexpected packet from server (expected Exception, EndOfStream or Log, got " - + String(Protocol::Server::toString(packet.type)) + ")", ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER); + throw NetException( + "Unexpected packet from server (expected Exception, EndOfStream or Log, got " + + String(Protocol::Server::toString(packet.type)) + ")", + ErrorCodes::UNEXPECTED_PACKET_FROM_SERVER); } } } @@ -2203,10 +2161,10 @@ private: current_format = "Vertical"; /// It is not clear how to write progress with parallel formatting. It may increase code complexity significantly. - if (!need_render_progress) - block_out_stream = context.getOutputStreamParallelIfPossible(current_format, *out_buf, block); + if (!progress_bar.need_render_progress) + block_out_stream = context->getOutputStreamParallelIfPossible(current_format, *out_buf, block); else - block_out_stream = context.getOutputStream(current_format, *out_buf, block); + block_out_stream = context->getOutputStream(current_format, *out_buf, block); block_out_stream->writePrefix(); } @@ -2234,8 +2192,8 @@ private: } else { - out_logs_buf = std::make_unique( - server_logs_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_APPEND | O_CREAT); + out_logs_buf + = std::make_unique(server_logs_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_APPEND | O_CREAT); wb = out_logs_buf.get(); } } @@ -2252,38 +2210,35 @@ private: return; processed_rows += block.rows(); + + /// Even if all blocks are empty, we still need to initialize the output stream to write empty resultset. initBlockOutputStream(block); /// The header block containing zero rows was used to initialize /// block_out_stream, do not output it. /// Also do not output too much data if we're fuzzing. - if (block.rows() != 0 - && (query_fuzzer_runs == 0 || processed_rows < 100)) - { - block_out_stream->write(block); - written_first_block = true; - } + if (block.rows() == 0 || (query_fuzzer_runs != 0 && processed_rows >= 100)) + return; - bool clear_progress = false; - if (need_render_progress) - clear_progress = std_out.offset() > 0; + if (progress_bar.need_render_progress) + progress_bar.clearProgress(); - if (clear_progress) - clearProgress(); + block_out_stream->write(block); + progress_bar.written_first_block = true; /// Received data block is immediately displayed to the user. block_out_stream->flush(); /// Restore progress bar after data block. - if (clear_progress) - writeProgress(); + if (progress_bar.need_render_progress) + progress_bar.writeProgress(progress, watch.elapsed()); } void onLogData(Block & block) { initLogsOutputStream(); - clearProgress(); + progress_bar.clearProgress(); logs_out_stream->write(block); logs_out_stream->flush(); } @@ -2304,128 +2259,26 @@ private: void onProgress(const Progress & value) { - if (!progress.incrementPiecewiseAtomically(value)) + if (!progress_bar.updateProgress(progress, value)) { // Just a keep-alive update. return; } if (block_out_stream) block_out_stream->onProgress(value); - - writeProgress(); - } - - - void clearProgress() - { - if (written_progress_chars) - { - written_progress_chars = 0; - std::cerr << "\r" CLEAR_TO_END_OF_LINE; - } - } - - - void writeProgress() - { - if (!need_render_progress) - return; - - /// Output all progress bar commands to stderr at once to avoid flicker. - WriteBufferFromFileDescriptor message(STDERR_FILENO, 1024); - - static size_t increment = 0; - static const char * indicators[8] = - { - "\033[1;30m→\033[0m", - "\033[1;31m↘\033[0m", - "\033[1;32m↓\033[0m", - "\033[1;33m↙\033[0m", - "\033[1;34m←\033[0m", - "\033[1;35m↖\033[0m", - "\033[1;36m↑\033[0m", - "\033[1m↗\033[0m", - }; - - const char * indicator = indicators[increment % 8]; - - size_t terminal_width = getTerminalWidth(); - - if (!written_progress_chars) - { - /// If the current line is not empty, the progress must be output on the next line. - /// The trick is found here: https://www.vidarholen.net/contents/blog/?p=878 - message << std::string(terminal_width, ' '); - } - message << '\r'; - - size_t prefix_size = message.count(); - - message << indicator << " Progress: "; - - message - << formatReadableQuantity(progress.read_rows) << " rows, " - << formatReadableSizeWithDecimalSuffix(progress.read_bytes); - - size_t elapsed_ns = watch.elapsed(); - if (elapsed_ns) - message << " (" - << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., " - << formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.) "; - else - message << ". "; - - written_progress_chars = message.count() - prefix_size - (strlen(indicator) - 2); /// Don't count invisible output (escape sequences). - - /// If the approximate number of rows to process is known, we can display a progress bar and percentage. - if (progress.total_rows_to_read > 0) - { - size_t total_rows_corrected = std::max(progress.read_rows, progress.total_rows_to_read); - - /// To avoid flicker, display progress bar only if .5 seconds have passed since query execution start - /// and the query is less than halfway done. - - if (elapsed_ns > 500000000) - { - /// Trigger to start displaying progress bar. If query is mostly done, don't display it. - if (progress.read_rows * 2 < total_rows_corrected) - show_progress_bar = true; - - if (show_progress_bar) - { - ssize_t width_of_progress_bar = static_cast(terminal_width) - written_progress_chars - strlen(" 99%"); - if (width_of_progress_bar > 0) - { - std::string bar = UnicodeBar::render(UnicodeBar::getWidth(progress.read_rows, 0, total_rows_corrected, width_of_progress_bar)); - message << "\033[0;32m" << bar << "\033[0m"; - if (width_of_progress_bar > static_cast(bar.size() / UNICODE_BAR_CHAR_SIZE)) - message << std::string(width_of_progress_bar - bar.size() / UNICODE_BAR_CHAR_SIZE, ' '); - } - } - } - - /// Underestimate percentage a bit to avoid displaying 100%. - message << ' ' << (99 * progress.read_rows / total_rows_corrected) << '%'; - } - - message << CLEAR_TO_END_OF_LINE; - ++increment; - - message.next(); + progress_bar.writeProgress(progress, watch.elapsed()); } void writeFinalProgress() { - std::cout << "Processed " - << formatReadableQuantity(progress.read_rows) << " rows, " - << formatReadableSizeWithDecimalSuffix(progress.read_bytes); + std::cout << "Processed " << formatReadableQuantity(progress.read_rows) << " rows, " + << formatReadableSizeWithDecimalSuffix(progress.read_bytes); size_t elapsed_ns = watch.elapsed(); if (elapsed_ns) - std::cout << " (" - << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., " - << formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.) "; + std::cout << " (" << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., " + << formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.)"; else std::cout << ". "; } @@ -2448,7 +2301,7 @@ private: void onEndOfStream() { - clearProgress(); + progress_bar.clearProgress(); if (block_out_stream) block_out_stream->writeSuffix(); @@ -2458,9 +2311,9 @@ private: resetOutput(); - if (is_interactive && !written_first_block) + if (is_interactive && !progress_bar.written_first_block) { - clearProgress(); + progress_bar.clearProgress(); std::cout << "Ok." << std::endl; } } @@ -2476,9 +2329,8 @@ private: /// It is needed if garbage is left in terminal. /// Show cursor. It can be left hidden by invocation of previous programs. /// A test for this feature: perl -e 'print "x"x100000'; echo -ne '\033[0;0H\033[?25l'; clickhouse-client - std::cout << - "\033[0J" - "\033[?25h"; + std::cout << "\033[0J" + "\033[?25h"; } public: @@ -2496,7 +2348,7 @@ public: */ using Arguments = std::vector; - Arguments common_arguments{""}; /// 0th argument is ignored. + Arguments common_arguments{""}; /// 0th argument is ignored. std::vector external_tables_arguments; bool in_external_group = false; @@ -2510,22 +2362,19 @@ public: external_tables_arguments.emplace_back(Arguments{""}); } /// Options with value after equal sign. - else if (in_external_group - && (0 == strncmp(arg, "--file=", strlen("--file=")) - || 0 == strncmp(arg, "--name=", strlen("--name=")) - || 0 == strncmp(arg, "--format=", strlen("--format=")) - || 0 == strncmp(arg, "--structure=", strlen("--structure=")) - || 0 == strncmp(arg, "--types=", strlen("--types=")))) + else if ( + in_external_group + && (0 == strncmp(arg, "--file=", strlen("--file=")) || 0 == strncmp(arg, "--name=", strlen("--name=")) + || 0 == strncmp(arg, "--format=", strlen("--format=")) || 0 == strncmp(arg, "--structure=", strlen("--structure=")) + || 0 == strncmp(arg, "--types=", strlen("--types=")))) { external_tables_arguments.back().emplace_back(arg); } /// Options with value after whitespace. - else if (in_external_group - && (0 == strcmp(arg, "--file") - || 0 == strcmp(arg, "--name") - || 0 == strcmp(arg, "--format") - || 0 == strcmp(arg, "--structure") - || 0 == strcmp(arg, "--types"))) + else if ( + in_external_group + && (0 == strcmp(arg, "--file") || 0 == strcmp(arg, "--name") || 0 == strcmp(arg, "--format") + || 0 == strcmp(arg, "--structure") || 0 == strcmp(arg, "--types"))) { if (arg_num + 1 < argc) { @@ -2636,13 +2485,10 @@ public: /// Commandline options related to external tables. po::options_description external_description = createOptionsDescription("External tables options", terminal_width); - external_description.add_options() - ("file", po::value(), "data file or - for stdin") - ("name", po::value()->default_value("_data"), "name of the table") - ("format", po::value()->default_value("TabSeparated"), "data format") - ("structure", po::value(), "structure") - ("types", po::value(), "types") - ; + external_description.add_options()("file", po::value(), "data file or - for stdin")( + "name", + po::value()->default_value("_data"), + "name of the table")("format", po::value()->default_value("TabSeparated"), "data format")("structure", po::value(), "structure")("types", po::value(), "types"); /// Parse main commandline options. po::parsed_options parsed = po::command_line_parser(common_arguments).options(main_description).run(); @@ -2670,7 +2516,7 @@ public: /// Output of help message. if (options.count("help") - || (options.count("host") && options["host"].as() == "elp")) /// If user writes -help instead of --help. + || (options.count("host") && options["host"].as() == "elp")) /// If user writes -help instead of --help. { std::cout << main_description << "\n"; std::cout << external_description << "\n"; @@ -2710,12 +2556,12 @@ public: } } - context.makeGlobalContext(); - context.setSettings(cmd_settings); + context->makeGlobalContext(); + context->setSettings(cmd_settings); /// Copy settings-related program options to config. /// TODO: Is this code necessary? - for (const auto & setting : context.getSettingsRef().all()) + for (const auto & setting : context->getSettingsRef().all()) { const auto & name = setting.getName(); if (options.count(name)) @@ -2807,19 +2653,15 @@ public: { std::string traceparent = options["opentelemetry-traceparent"].as(); std::string error; - if (!context.getClientInfo().client_trace_context.parseTraceparentHeader( - traceparent, error)) + if (!context->getClientInfo().client_trace_context.parseTraceparentHeader(traceparent, error)) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Cannot parse OpenTelemetry traceparent '{}': {}", - traceparent, error); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot parse OpenTelemetry traceparent '{}': {}", traceparent, error); } } if (options.count("opentelemetry-tracestate")) { - context.getClientInfo().client_trace_context.tracestate = - options["opentelemetry-tracestate"].as(); + context->getClientInfo().client_trace_context.tracestate = options["opentelemetry-tracestate"].as(); } argsToConfig(common_arguments, config(), 100); @@ -2827,7 +2669,6 @@ public: clearPasswordFromCommandLine(argc, argv); } }; - } #pragma GCC diagnostic ignored "-Wunused-function" diff --git a/programs/client/ConnectionParameters.cpp b/programs/client/ConnectionParameters.cpp index 19734dd5ffa..6faf43759df 100644 --- a/programs/client/ConnectionParameters.cpp +++ b/programs/client/ConnectionParameters.cpp @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include @@ -60,7 +62,9 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati #endif } - compression = config.getBool("compression", true) ? Protocol::Compression::Enable : Protocol::Compression::Disable; + /// By default compression is disabled if address looks like localhost. + compression = config.getBool("compression", !isLocalAddress(DNSResolver::instance().resolveHost(host))) + ? Protocol::Compression::Enable : Protocol::Compression::Disable; timeouts = ConnectionTimeouts( Poco::Timespan(config.getInt("connect_timeout", DBMS_DEFAULT_CONNECT_TIMEOUT_SEC), 0), diff --git a/programs/client/QueryFuzzer.cpp b/programs/client/QueryFuzzer.cpp index 0c8dc0731f9..721e5acb991 100644 --- a/programs/client/QueryFuzzer.cpp +++ b/programs/client/QueryFuzzer.cpp @@ -27,6 +27,7 @@ #include #include + namespace DB { @@ -37,34 +38,33 @@ namespace ErrorCodes Field QueryFuzzer::getRandomField(int type) { + static constexpr Int64 bad_int64_values[] + = {-2, -1, 0, 1, 2, 3, 7, 10, 100, 255, 256, 257, 1023, 1024, + 1025, 65535, 65536, 65537, 1024 * 1024 - 1, 1024 * 1024, + 1024 * 1024 + 1, INT_MIN - 1ll, INT_MIN, INT_MIN + 1, + INT_MAX - 1, INT_MAX, INT_MAX + 1ll, INT64_MIN, INT64_MIN + 1, + INT64_MAX - 1, INT64_MAX}; switch (type) { case 0: { - static constexpr Int64 values[] - = {-2, -1, 0, 1, 2, 3, 7, 10, 100, 255, 256, 257, 1023, 1024, - 1025, 65535, 65536, 65537, 1024 * 1024 - 1, 1024 * 1024, - 1024 * 1024 + 1, INT64_MIN, INT64_MAX}; - return values[fuzz_rand() % (sizeof(values) / sizeof(*values))]; + return bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) + / sizeof(*bad_int64_values))]; } case 1: { static constexpr float values[] - = {NAN, INFINITY, -INFINITY, 0., 0.0001, 0.5, 0.9999, - 1., 1.0001, 2., 10.0001, 100.0001, 1000.0001}; - return values[fuzz_rand() % (sizeof(values) / sizeof(*values))]; + = {NAN, INFINITY, -INFINITY, 0., -0., 0.0001, 0.5, 0.9999, + 1., 1.0001, 2., 10.0001, 100.0001, 1000.0001, 1e10, 1e20, + FLT_MIN, FLT_MIN + FLT_EPSILON, FLT_MAX, FLT_MAX + FLT_EPSILON}; return values[fuzz_rand() % (sizeof(values) / sizeof(*values))]; } case 2: { - static constexpr Int64 values[] - = {-2, -1, 0, 1, 2, 3, 7, 10, 100, 255, 256, 257, 1023, 1024, - 1025, 65535, 65536, 65537, 1024 * 1024 - 1, 1024 * 1024, - 1024 * 1024 + 1, INT64_MIN, INT64_MAX}; static constexpr UInt64 scales[] = {0, 1, 2, 10}; return DecimalField( - values[fuzz_rand() % (sizeof(values) / sizeof(*values))], - scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))] - ); + bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) + / sizeof(*bad_int64_values))], + scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))]); } default: assert(false); diff --git a/programs/client/QueryFuzzer.h b/programs/client/QueryFuzzer.h index 38714205967..7c79e683eb4 100644 --- a/programs/client/QueryFuzzer.h +++ b/programs/client/QueryFuzzer.h @@ -4,11 +4,14 @@ #include #include +#include + #include #include #include #include + namespace DB { @@ -50,7 +53,7 @@ struct QueryFuzzer // Some debug fields for detecting problematic ASTs with loops. // These are reset for each fuzzMain call. std::unordered_set debug_visited_nodes; - ASTPtr * debug_top_ast; + ASTPtr * debug_top_ast = nullptr; // This is the only function you have to call -- it will modify the passed diff --git a/programs/client/Suggest.cpp b/programs/client/Suggest.cpp index dfa7048349e..8d4c0fdbd5a 100644 --- a/programs/client/Suggest.cpp +++ b/programs/client/Suggest.cpp @@ -108,14 +108,6 @@ void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeo " UNION ALL " "SELECT cluster FROM system.clusters" " UNION ALL " - "SELECT name FROM system.errors" - " UNION ALL " - "SELECT event FROM system.events" - " UNION ALL " - "SELECT metric FROM system.asynchronous_metrics" - " UNION ALL " - "SELECT metric FROM system.metrics" - " UNION ALL " "SELECT macro FROM system.macros" " UNION ALL " "SELECT policy_name FROM system.storage_policies" @@ -139,17 +131,12 @@ void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeo query << ") WHERE notEmpty(res)"; - Settings settings; - /// To show all rows from: - /// - system.errors - /// - system.events - settings.system_events_show_zero_values = true; - fetch(connection, timeouts, query.str(), settings); + fetch(connection, timeouts, query.str()); } -void Suggest::fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query, Settings & settings) +void Suggest::fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query) { - connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete, &settings); + connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete); while (true) { diff --git a/programs/client/Suggest.h b/programs/client/Suggest.h index 0049bc08ebf..03332088cbe 100644 --- a/programs/client/Suggest.h +++ b/programs/client/Suggest.h @@ -33,7 +33,7 @@ public: private: void loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit); - void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query, Settings & settings); + void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query); void fillWordsFromBlock(const Block & block); /// Words are fetched asynchronously. diff --git a/programs/copier/CMakeLists.txt b/programs/copier/CMakeLists.txt index f69b30f3f43..dfb067b00f9 100644 --- a/programs/copier/CMakeLists.txt +++ b/programs/copier/CMakeLists.txt @@ -1,7 +1,7 @@ set(CLICKHOUSE_COPIER_SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp) + "${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp") set (CLICKHOUSE_COPIER_LINK PRIVATE diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index bede40d65f5..a60896388a0 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes void ClusterCopier::init() { - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); task_description_watch_callback = [this] (const Coordination::WatchResponse & response) { @@ -39,14 +39,14 @@ void ClusterCopier::init() task_cluster_initial_config = task_cluster_current_config; task_cluster->loadTasks(*task_cluster_initial_config); - context.setClustersConfig(task_cluster_initial_config, task_cluster->clusters_prefix); + getContext()->setClustersConfig(task_cluster_initial_config, task_cluster->clusters_prefix); /// Set up shards and their priority task_cluster->random_engine.seed(task_cluster->random_device()); for (auto & task_table : task_cluster->table_tasks) { - task_table.cluster_pull = context.getCluster(task_table.cluster_pull_name); - task_table.cluster_push = context.getCluster(task_table.cluster_push_name); + task_table.cluster_pull = getContext()->getCluster(task_table.cluster_pull_name); + task_table.cluster_push = getContext()->getCluster(task_table.cluster_push_name); task_table.initShards(task_cluster->random_engine); } @@ -206,7 +206,7 @@ void ClusterCopier::uploadTaskDescription(const std::string & task_path, const s if (task_config_str.empty()) return; - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); zookeeper->createAncestors(local_task_description_path); auto code = zookeeper->tryCreate(local_task_description_path, task_config_str, zkutil::CreateMode::Persistent); @@ -219,7 +219,7 @@ void ClusterCopier::uploadTaskDescription(const std::string & task_path, const s void ClusterCopier::reloadTaskDescription() { - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); task_description_watch_zookeeper = zookeeper; String task_config_str; @@ -235,7 +235,7 @@ void ClusterCopier::reloadTaskDescription() /// Setup settings task_cluster->reloadSettings(*config); - context.setSettings(task_cluster->settings_common); + getContext()->setSettings(task_cluster->settings_common); task_cluster_current_config = config; task_description_current_stat = stat; @@ -440,7 +440,7 @@ bool ClusterCopier::checkPartitionPieceIsDone(const TaskTable & task_table, cons { LOG_DEBUG(log, "Check that all shards processed partition {} piece {} successfully", partition_name, piece_number); - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); /// Collect all shards that contain partition piece number piece_number. Strings piece_status_paths; @@ -532,7 +532,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t LOG_DEBUG(log, "Try to move {} to destination table", partition_name); - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); const auto current_partition_attach_is_active = task_table.getPartitionAttachIsActivePath(partition_name); const auto current_partition_attach_is_done = task_table.getPartitionAttachIsDonePath(partition_name); @@ -714,6 +714,8 @@ ASTPtr ClusterCopier::removeAliasColumnsFromCreateQuery(const ASTPtr & query_ast new_columns_list->set(new_columns_list->columns, new_columns); if (const auto * indices = query_ast->as()->columns_list->indices) new_columns_list->set(new_columns_list->indices, indices->clone()); + if (const auto * projections = query_ast->as()->columns_list->projections) + new_columns_list->set(new_columns_list->projections, projections->clone()); new_query.replace(new_query.columns_list, new_columns_list); @@ -1095,7 +1097,7 @@ TaskStatus ClusterCopier::tryCreateDestinationTable(const ConnectionTimeouts & t = rewriteCreateQueryStorage(task_shard->current_pull_table_create_query, task_table.table_push, task_table.engine_push_ast); auto & create = create_query_push_ast->as(); create.if_not_exists = true; - InterpreterCreateQuery::prepareOnClusterQuery(create, context, task_table.cluster_push_name); + InterpreterCreateQuery::prepareOnClusterQuery(create, getContext(), task_table.cluster_push_name); String query = queryToString(create_query_push_ast); LOG_DEBUG(log, "Create destination tables. Query: {}", query); @@ -1211,7 +1213,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( auto split_table_for_current_piece = task_shard.list_of_split_tables_on_shard[current_piece_number]; - auto zookeeper = context.getZooKeeper(); + auto zookeeper = getContext()->getZooKeeper(); const String piece_is_dirty_flag_path = partition_piece.getPartitionPieceIsDirtyPath(); const String piece_is_dirty_cleaned_path = partition_piece.getPartitionPieceIsCleanedPath(); @@ -1262,7 +1264,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( ParserQuery p_query(query.data() + query.size()); - const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); return parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth); }; @@ -1366,10 +1368,10 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( ASTPtr query_select_ast = get_select_query(split_table_for_current_piece, "count()", /*enable_splitting*/ true); UInt64 count; { - Context local_context = context; + auto local_context = Context::createCopy(context); // Use pull (i.e. readonly) settings, but fetch data from destination servers - local_context.setSettings(task_cluster->settings_pull); - local_context.setSetting("skip_unavailable_shards", true); + local_context->setSettings(task_cluster->settings_pull); + local_context->setSetting("skip_unavailable_shards", true); Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_select_ast, local_context)->execute().getInputStream()); count = (block) ? block.safeGetByPosition(0).column->getUInt(0) : 0; @@ -1468,7 +1470,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( query += "INSERT INTO " + getQuotedTable(split_table_for_current_piece) + " VALUES "; ParserQuery p_query(query.data() + query.size()); - const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); query_insert_ast = parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth); LOG_DEBUG(log, "Executing INSERT query: {}", query); @@ -1476,18 +1478,18 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( try { - std::unique_ptr context_select = std::make_unique(context); + auto context_select = Context::createCopy(context); context_select->setSettings(task_cluster->settings_pull); - std::unique_ptr context_insert = std::make_unique(context); + auto context_insert = Context::createCopy(context); context_insert->setSettings(task_cluster->settings_push); /// Custom INSERT SELECT implementation BlockInputStreamPtr input; BlockOutputStreamPtr output; { - BlockIO io_select = InterpreterFactory::get(query_select_ast, *context_select)->execute(); - BlockIO io_insert = InterpreterFactory::get(query_insert_ast, *context_insert)->execute(); + BlockIO io_select = InterpreterFactory::get(query_select_ast, context_select)->execute(); + BlockIO io_insert = InterpreterFactory::get(query_insert_ast, context_insert)->execute(); input = io_select.getInputStream(); output = io_insert.out; @@ -1581,7 +1583,7 @@ void ClusterCopier::dropAndCreateLocalTable(const ASTPtr & create_ast) const auto & create = create_ast->as(); dropLocalTableIfExists({create.database, create.table}); - InterpreterCreateQuery interpreter(create_ast, context); + InterpreterCreateQuery interpreter(create_ast, getContext()); interpreter.execute(); } @@ -1592,7 +1594,7 @@ void ClusterCopier::dropLocalTableIfExists(const DatabaseAndTableName & table_na drop_ast->database = table_name.first; drop_ast->table = table_name.second; - InterpreterDropQuery interpreter(drop_ast, context); + InterpreterDropQuery interpreter(drop_ast, getContext()); interpreter.execute(); } @@ -1654,8 +1656,8 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT String ClusterCopier::getRemoteCreateTable(const DatabaseAndTableName & table, Connection & connection, const Settings & settings) { - Context remote_context(context); - remote_context.setSettings(settings); + auto remote_context = Context::createCopy(context); + remote_context->setSettings(settings); String query = "SHOW CREATE TABLE " + getQuotedTable(table); Block block = getBlockWithAllStreamData(std::make_shared( @@ -1674,7 +1676,7 @@ ASTPtr ClusterCopier::getCreateTableForPullShard(const ConnectionTimeouts & time task_cluster->settings_pull); ParserCreateQuery parser_create_query; - const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); return parseQuery(parser_create_query, create_query_pull_str, settings.max_query_size, settings.max_parser_depth); } @@ -1703,7 +1705,7 @@ void ClusterCopier::createShardInternalTables(const ConnectionTimeouts & timeout /// Create special cluster with single shard String shard_read_cluster_name = read_shard_prefix + task_table.cluster_pull_name; ClusterPtr cluster_pull_current_shard = task_table.cluster_pull->getClusterWithSingleShard(task_shard.indexInCluster()); - context.setCluster(shard_read_cluster_name, cluster_pull_current_shard); + getContext()->setCluster(shard_read_cluster_name, cluster_pull_current_shard); auto storage_shard_ast = createASTStorageDistributed(shard_read_cluster_name, task_table.table_pull.first, task_table.table_pull.second); @@ -1763,13 +1765,13 @@ std::set ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti } ParserQuery parser_query(query.data() + query.size()); - const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); LOG_DEBUG(log, "Computing destination partition set, executing query: {}", query); - Context local_context = context; - local_context.setSettings(task_cluster->settings_pull); + auto local_context = Context::createCopy(context); + local_context->setSettings(task_cluster->settings_pull); Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_ast, local_context)->execute().getInputStream()); if (block) @@ -1809,11 +1811,11 @@ bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts, LOG_DEBUG(log, "Checking shard {} for partition {} existence, executing query: {}", task_shard.getDescription(), partition_quoted_name, query); ParserQuery parser_query(query.data() + query.size()); -const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); - Context local_context = context; - local_context.setSettings(task_cluster->settings_pull); + auto local_context = Context::createCopy(context); + local_context->setSettings(task_cluster->settings_pull); return InterpreterFactory::get(query_ast, local_context)->execute().getInputStream()->read().rows() != 0; } @@ -1848,11 +1850,11 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi LOG_DEBUG(log, "Checking shard {} for partition {} piece {} existence, executing query: {}", task_shard.getDescription(), partition_quoted_name, std::to_string(current_piece_number), query); ParserQuery parser_query(query.data() + query.size()); - const auto & settings = context.getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth); - Context local_context = context; - local_context.setSettings(task_cluster->settings_pull); + auto local_context = Context::createCopy(context); + local_context->setSettings(task_cluster->settings_pull); auto result = InterpreterFactory::get(query_ast, local_context)->execute().getInputStream()->read().rows(); if (result != 0) LOG_DEBUG(log, "Partition {} piece number {} is PRESENT on shard {}", partition_quoted_name, std::to_string(current_piece_number), task_shard.getDescription()); @@ -1908,7 +1910,7 @@ UInt64 ClusterCopier::executeQueryOnCluster( /// In that case we don't have local replicas, but do it just in case for (UInt64 i = 0; i < num_local_replicas; ++i) { - auto interpreter = InterpreterFactory::get(query_ast, context); + auto interpreter = InterpreterFactory::get(query_ast, getContext()); interpreter->execute(); if (increment_and_check_exit()) @@ -1923,8 +1925,8 @@ UInt64 ClusterCopier::executeQueryOnCluster( auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(shard_settings).getSaturated(shard_settings.max_execution_time); auto connections = shard.pool->getMany(timeouts, &shard_settings, pool_mode); - Context shard_context(context); - shard_context.setSettings(shard_settings); + auto shard_context = Context::createCopy(context); + shard_context->setSettings(shard_settings); for (auto & connection : connections) { diff --git a/programs/copier/ClusterCopier.h b/programs/copier/ClusterCopier.h index 95bb54cf4e1..e875ca7df2e 100644 --- a/programs/copier/ClusterCopier.h +++ b/programs/copier/ClusterCopier.h @@ -12,18 +12,17 @@ namespace DB { -class ClusterCopier +class ClusterCopier : WithContext { public: ClusterCopier(const String & task_path_, const String & host_id_, const String & proxy_database_name_, - Context & context_) - : + ContextPtr context_) + : WithContext(context_), task_zookeeper_path(task_path_), host_id(host_id_), working_database_name(proxy_database_name_), - context(context_), log(&Poco::Logger::get("ClusterCopier")) {} void init(); @@ -36,7 +35,7 @@ public: /// Compute set of partitions, assume set of partitions aren't changed during the processing void discoverTablePartitions(const ConnectionTimeouts & timeouts, TaskTable & task_table, UInt64 num_threads = 0); - void uploadTaskDescription(const std::string & task_path, const std::string & task_file, const bool force); + void uploadTaskDescription(const std::string & task_path, const std::string & task_file, bool force); void reloadTaskDescription(); @@ -120,7 +119,7 @@ protected: /// Removes MATERIALIZED and ALIAS columns from create table query static ASTPtr removeAliasColumnsFromCreateQuery(const ASTPtr & query_ast); - bool tryDropPartitionPiece(ShardPartition & task_partition, const size_t current_piece_number, + bool tryDropPartitionPiece(ShardPartition & task_partition, size_t current_piece_number, const zkutil::ZooKeeperPtr & zookeeper, const CleanStateClock & clean_state_clock); static constexpr UInt64 max_table_tries = 3; @@ -141,7 +140,7 @@ protected: TaskStatus processPartitionPieceTaskImpl(const ConnectionTimeouts & timeouts, ShardPartition & task_partition, - const size_t current_piece_number, + size_t current_piece_number, bool is_unprioritized_task); void dropAndCreateLocalTable(const ASTPtr & create_ast); @@ -219,7 +218,6 @@ private: bool experimental_use_sample_offset{false}; - Context & context; Poco::Logger * log; std::chrono::milliseconds default_sleep_time{1000}; diff --git a/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp index e3169a49ecf..d3fff616b65 100644 --- a/programs/copier/ClusterCopierApp.cpp +++ b/programs/copier/ClusterCopierApp.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include @@ -110,9 +111,9 @@ void ClusterCopierApp::mainImpl() LOG_INFO(log, "Starting clickhouse-copier (id {}, host_id {}, path {}, revision {})", process_id, host_id, process_path, ClickHouseRevision::getVersionRevision()); SharedContextHolder shared_context = Context::createShared(); - auto context = std::make_unique(Context::createGlobal(shared_context.get())); + auto context = Context::createGlobal(shared_context.get()); context->makeGlobalContext(); - SCOPE_EXIT(context->shutdown()); + SCOPE_EXIT_SAFE(context->shutdown()); context->setConfig(loaded_config.configuration); context->setApplicationType(Context::ApplicationType::LOCAL); @@ -127,13 +128,13 @@ void ClusterCopierApp::mainImpl() registerFormats(); static const std::string default_database = "_local"; - DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, *context)); + DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, context)); context->setCurrentDatabase(default_database); /// Initialize query scope just in case. - CurrentThread::QueryScope query_scope(*context); + CurrentThread::QueryScope query_scope(context); - auto copier = std::make_unique(task_path, host_id, default_database, *context); + auto copier = std::make_unique(task_path, host_id, default_database, context); copier->setSafeMode(is_safe_mode); copier->setCopyFaultProbability(copy_fault_probability); copier->setMoveFaultProbability(move_fault_probability); diff --git a/programs/copier/Internals.cpp b/programs/copier/Internals.cpp index ea2be469945..bec612a8226 100644 --- a/programs/copier/Internals.cpp +++ b/programs/copier/Internals.cpp @@ -222,8 +222,8 @@ Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast) { String pk_column = primary_key_expr_list->children[i]->getColumnName(); if (pk_column != sorting_key_column) - throw Exception("Primary key must be a prefix of the sorting key, but in position " - + toString(i) + " its column is " + pk_column + ", not " + sorting_key_column, + throw Exception("Primary key must be a prefix of the sorting key, but the column in the position " + + toString(i) + " is " + sorting_key_column +", not " + pk_column, ErrorCodes::BAD_ARGUMENTS); if (!primary_key_columns_set.emplace(pk_column).second) diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index ba3d6e8557b..5bf19191353 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -102,8 +102,8 @@ int mainEntryClickHouseFormat(int argc, char ** argv) } SharedContextHolder shared_context = Context::createShared(); - Context context = Context::createGlobal(shared_context.get()); - context.makeGlobalContext(); + auto context = Context::createGlobal(shared_context.get()); + context->makeGlobalContext(); registerFunctions(); registerAggregateFunctions(); diff --git a/programs/git-import/git-import.cpp b/programs/git-import/git-import.cpp index b07435dcf78..7977cfba79d 100644 --- a/programs/git-import/git-import.cpp +++ b/programs/git-import/git-import.cpp @@ -774,7 +774,7 @@ UInt128 diffHash(const CommitDiff & file_changes) } UInt128 hash_of_diff; - hasher.get128(hash_of_diff.low, hash_of_diff.high); + hasher.get128(hash_of_diff.items[0], hash_of_diff.items[1]); return hash_of_diff; } diff --git a/programs/install/Install.cpp b/programs/install/Install.cpp index ef72624e7ab..96d336673d0 100644 --- a/programs/install/Install.cpp +++ b/programs/install/Install.cpp @@ -71,6 +71,9 @@ namespace ErrorCodes } +/// ANSI escape sequence for intense color in terminal. +#define HILITE "\033[1m" +#define END_HILITE "\033[0m" using namespace DB; namespace po = boost::program_options; @@ -559,20 +562,32 @@ int mainEntryClickHouseInstall(int argc, char ** argv) bool stdin_is_a_tty = isatty(STDIN_FILENO); bool stdout_is_a_tty = isatty(STDOUT_FILENO); - bool is_interactive = stdin_is_a_tty && stdout_is_a_tty; + + /// dpkg or apt installers can ask for non-interactive work explicitly. + + const char * debian_frontend_var = getenv("DEBIAN_FRONTEND"); + bool noninteractive = debian_frontend_var && debian_frontend_var == std::string_view("noninteractive"); + + bool is_interactive = !noninteractive && stdin_is_a_tty && stdout_is_a_tty; + + /// We can ask password even if stdin is closed/redirected but /dev/tty is available. + bool can_ask_password = !noninteractive && stdout_is_a_tty; if (has_password_for_default_user) { - fmt::print("Password for default user is already specified. To remind or reset, see {} and {}.\n", + fmt::print(HILITE "Password for default user is already specified. To remind or reset, see {} and {}." END_HILITE "\n", users_config_file.string(), users_d.string()); } - else if (!is_interactive) + else if (!can_ask_password) { - fmt::print("Password for default user is empty string. See {} and {} to change it.\n", + fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n", users_config_file.string(), users_d.string()); } else { + /// NOTE: When installing debian package with dpkg -i, stdin is not a terminal but we are still being able to enter password. + /// More sophisticated method with /dev/tty is used inside the `readpassphrase` function. + char buf[1000] = {}; std::string password; if (auto * result = readpassphrase("Enter password for default user: ", buf, sizeof(buf), 0)) @@ -600,7 +615,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv) "\n"; out.sync(); out.finalize(); - fmt::print("Password for default user is saved in file {}.\n", password_file); + fmt::print(HILITE "Password for default user is saved in file {}." END_HILITE "\n", password_file); #else out << "\n" " \n" @@ -611,12 +626,12 @@ int mainEntryClickHouseInstall(int argc, char ** argv) "\n"; out.sync(); out.finalize(); - fmt::print("Password for default user is saved in plaintext in file {}.\n", password_file); + fmt::print(HILITE "Password for default user is saved in plaintext in file {}." END_HILITE "\n", password_file); #endif has_password_for_default_user = true; } else - fmt::print("Password for default user is empty string. See {} and {} to change it.\n", + fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n", users_config_file.string(), users_d.string()); } @@ -641,7 +656,6 @@ int mainEntryClickHouseInstall(int argc, char ** argv) " This is optional. Taskstats accounting will be disabled." " To enable taskstats accounting you may add the required capability later manually.\"", "/tmp/test_setcap.sh", fs::canonical(main_bin_path).string()); - fmt::print(" {}\n", command); executeScript(command); #endif @@ -830,8 +844,8 @@ namespace fmt::print("The pidof command returned unusual output.\n"); } - WriteBufferFromFileDescriptor stderr(STDERR_FILENO); - copyData(sh->err, stderr); + WriteBufferFromFileDescriptor std_err(STDERR_FILENO); + copyData(sh->err, std_err); sh->tryWait(); } @@ -842,6 +856,13 @@ namespace { fmt::print("The process with pid = {} is running.\n", pid); } + else if (errno == ESRCH) + { + fmt::print("The process with pid = {} does not exist.\n", pid); + return 0; + } + else + throwFromErrno(fmt::format("Cannot obtain the status of pid {} with `kill`", pid), ErrorCodes::CANNOT_KILL); } if (!pid) diff --git a/programs/library-bridge/CMakeLists.txt b/programs/library-bridge/CMakeLists.txt index a9aa5b4f366..0913c6e4a9a 100644 --- a/programs/library-bridge/CMakeLists.txt +++ b/programs/library-bridge/CMakeLists.txt @@ -1,6 +1,6 @@ set (CLICKHOUSE_LIBRARY_BRIDGE_SOURCES library-bridge.cpp - library-log.cpp + LibraryInterface.cpp LibraryBridge.cpp Handlers.cpp HandlerFactory.cpp @@ -17,7 +17,6 @@ add_executable(clickhouse-library-bridge ${CLICKHOUSE_LIBRARY_BRIDGE_SOURCES}) target_link_libraries(clickhouse-library-bridge PRIVATE daemon dbms - clickhouse_parsers bridge ) diff --git a/programs/library-bridge/HandlerFactory.cpp b/programs/library-bridge/HandlerFactory.cpp index 4257877b512..9f53a24156f 100644 --- a/programs/library-bridge/HandlerFactory.cpp +++ b/programs/library-bridge/HandlerFactory.cpp @@ -16,7 +16,7 @@ namespace DB return std::make_unique(keep_alive_timeout); if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST) - return std::make_unique(keep_alive_timeout, context); + return std::make_unique(keep_alive_timeout, getContext()); return nullptr; } diff --git a/programs/library-bridge/HandlerFactory.h b/programs/library-bridge/HandlerFactory.h index 5dee62740cf..93f0721bf01 100644 --- a/programs/library-bridge/HandlerFactory.h +++ b/programs/library-bridge/HandlerFactory.h @@ -12,17 +12,17 @@ class SharedLibraryHandler; using SharedLibraryHandlerPtr = std::shared_ptr; /// Factory for '/ping', '/' handlers. -class LibraryBridgeHandlerFactory : public HTTPRequestHandlerFactory +class LibraryBridgeHandlerFactory : public HTTPRequestHandlerFactory, WithContext { public: LibraryBridgeHandlerFactory( const std::string & name_, size_t keep_alive_timeout_, - Context & context_) - : log(&Poco::Logger::get(name_)) + ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get(name_)) , name(name_) , keep_alive_timeout(keep_alive_timeout_) - , context(context_) { } @@ -32,7 +32,6 @@ private: Poco::Logger * log; std::string name; size_t keep_alive_timeout; - Context & context; }; } diff --git a/programs/library-bridge/Handlers.cpp b/programs/library-bridge/Handlers.cpp index ad2cc5f9d9e..6a1bfbbccb7 100644 --- a/programs/library-bridge/Handlers.cpp +++ b/programs/library-bridge/Handlers.cpp @@ -131,7 +131,7 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe } ReadBufferFromString read_block_buf(params.get("null_values")); - auto format = FormatFactory::instance().getInput(FORMAT, read_block_buf, *sample_block, context, DEFAULT_BLOCK_SIZE); + auto format = FormatFactory::instance().getInput(FORMAT, read_block_buf, *sample_block, getContext(), DEFAULT_BLOCK_SIZE); auto reader = std::make_shared(format); auto sample_block_with_nulls = reader->read(); @@ -176,7 +176,7 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe const auto & sample_block = library_handler->getSampleBlock(); auto input = library_handler->loadAll(); - BlockOutputStreamPtr output = FormatFactory::instance().getOutputStream(FORMAT, out, sample_block, context); + BlockOutputStreamPtr output = FormatFactory::instance().getOutputStream(FORMAT, out, sample_block, getContext()); copyData(*input, *output); } else if (method == "loadIds") @@ -193,7 +193,7 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe auto library_handler = SharedLibraryHandlerFactory::instance().get(dictionary_id); const auto & sample_block = library_handler->getSampleBlock(); auto input = library_handler->loadIds(ids); - BlockOutputStreamPtr output = FormatFactory::instance().getOutputStream(FORMAT, out, sample_block, context); + BlockOutputStreamPtr output = FormatFactory::instance().getOutputStream(FORMAT, out, sample_block, getContext()); copyData(*input, *output); } else if (method == "loadKeys") @@ -219,14 +219,14 @@ void LibraryRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServe } auto & read_buf = request.getStream(); - auto format = FormatFactory::instance().getInput(FORMAT, read_buf, *requested_sample_block, context, DEFAULT_BLOCK_SIZE); + auto format = FormatFactory::instance().getInput(FORMAT, read_buf, *requested_sample_block, getContext(), DEFAULT_BLOCK_SIZE); auto reader = std::make_shared(format); auto block = reader->read(); auto library_handler = SharedLibraryHandlerFactory::instance().get(dictionary_id); const auto & sample_block = library_handler->getSampleBlock(); auto input = library_handler->loadKeys(block.getColumns()); - BlockOutputStreamPtr output = FormatFactory::instance().getOutputStream(FORMAT, out, sample_block, context); + BlockOutputStreamPtr output = FormatFactory::instance().getOutputStream(FORMAT, out, sample_block, getContext()); copyData(*input, *output); } } diff --git a/programs/library-bridge/Handlers.h b/programs/library-bridge/Handlers.h index 9b163095da2..dac61d3a735 100644 --- a/programs/library-bridge/Handlers.h +++ b/programs/library-bridge/Handlers.h @@ -17,16 +17,16 @@ namespace DB /// names of dictionary attributes, sample block to parse block of null values, block of null values. Everything is /// passed in binary format and is urlencoded. When dictionary is cloned, a new handler is created. /// Each handler is unique to dictionary. -class LibraryRequestHandler : public HTTPRequestHandler +class LibraryRequestHandler : public HTTPRequestHandler, WithContext { public: LibraryRequestHandler( size_t keep_alive_timeout_, - Context & context_) - : log(&Poco::Logger::get("LibraryRequestHandler")) + ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get("LibraryRequestHandler")) , keep_alive_timeout(keep_alive_timeout_) - , context(context_) { } @@ -39,7 +39,6 @@ private: Poco::Logger * log; size_t keep_alive_timeout; - Context & context; }; diff --git a/programs/library-bridge/LibraryBridge.h b/programs/library-bridge/LibraryBridge.h index 168df76b9e2..9f2dafb89ab 100644 --- a/programs/library-bridge/LibraryBridge.h +++ b/programs/library-bridge/LibraryBridge.h @@ -12,12 +12,12 @@ class LibraryBridge : public IBridge { protected: - const std::string bridgeName() const override + std::string bridgeName() const override { return "LibraryBridge"; } - HandlerFactoryPtr getHandlerFactoryPtr(Context & context) const override + HandlerFactoryPtr getHandlerFactoryPtr(ContextPtr context) const override { return std::make_shared("LibraryRequestHandlerFactory-factory", keep_alive_timeout, context); } diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.cpp b/programs/library-bridge/LibraryInterface.cpp similarity index 97% rename from src/Dictionaries/LibraryDictionarySourceExternal.cpp rename to programs/library-bridge/LibraryInterface.cpp index 259d0a2846a..3975368c17f 100644 --- a/src/Dictionaries/LibraryDictionarySourceExternal.cpp +++ b/programs/library-bridge/LibraryInterface.cpp @@ -1,4 +1,5 @@ -#include "LibraryDictionarySourceExternal.h" +#include "LibraryInterface.h" + #include namespace diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.h b/programs/library-bridge/LibraryInterface.h similarity index 100% rename from src/Dictionaries/LibraryDictionarySourceExternal.h rename to programs/library-bridge/LibraryInterface.h diff --git a/programs/library-bridge/LibraryUtils.h b/programs/library-bridge/LibraryUtils.h index 359d1de93e3..8ced8df1c48 100644 --- a/programs/library-bridge/LibraryUtils.h +++ b/programs/library-bridge/LibraryUtils.h @@ -1,11 +1,12 @@ #pragma once #include -#include #include #include #include +#include "LibraryInterface.h" + namespace DB { diff --git a/programs/library-bridge/SharedLibraryHandler.h b/programs/library-bridge/SharedLibraryHandler.h index 5c0334ac89f..fa476995e32 100644 --- a/programs/library-bridge/SharedLibraryHandler.h +++ b/programs/library-bridge/SharedLibraryHandler.h @@ -23,6 +23,8 @@ public: SharedLibraryHandler(const SharedLibraryHandler & other); + SharedLibraryHandler & operator=(const SharedLibraryHandler & other) = delete; + ~SharedLibraryHandler(); BlockInputStreamPtr loadAll(); diff --git a/programs/library-bridge/library-log.cpp b/programs/library-bridge/library-log.cpp deleted file mode 100644 index 89fb31623b3..00000000000 --- a/programs/library-bridge/library-log.cpp +++ /dev/null @@ -1,66 +0,0 @@ -#include -#include - -namespace -{ -const char DICT_LOGGER_NAME[] = "LibraryDictionarySourceExternal"; -} - -namespace ClickHouseLibrary -{ - -std::string_view LIBRARY_CREATE_NEW_FUNC_NAME = "ClickHouseDictionary_v3_libNew"; -std::string_view LIBRARY_CLONE_FUNC_NAME = "ClickHouseDictionary_v3_libClone"; -std::string_view LIBRARY_DELETE_FUNC_NAME = "ClickHouseDictionary_v3_libDelete"; - -std::string_view LIBRARY_DATA_NEW_FUNC_NAME = "ClickHouseDictionary_v3_dataNew"; -std::string_view LIBRARY_DATA_DELETE_FUNC_NAME = "ClickHouseDictionary_v3_dataDelete"; - -std::string_view LIBRARY_LOAD_ALL_FUNC_NAME = "ClickHouseDictionary_v3_loadAll"; -std::string_view LIBRARY_LOAD_IDS_FUNC_NAME = "ClickHouseDictionary_v3_loadIds"; -std::string_view LIBRARY_LOAD_KEYS_FUNC_NAME = "ClickHouseDictionary_v3_loadKeys"; - -std::string_view LIBRARY_IS_MODIFIED_FUNC_NAME = "ClickHouseDictionary_v3_isModified"; -std::string_view LIBRARY_SUPPORTS_SELECTIVE_LOAD_FUNC_NAME = "ClickHouseDictionary_v3_supportsSelectiveLoad"; - -void log(LogLevel level, CString msg) -{ - auto & logger = Poco::Logger::get(DICT_LOGGER_NAME); - switch (level) - { - case LogLevel::TRACE: - if (logger.trace()) - logger.trace(msg); - break; - case LogLevel::DEBUG: - if (logger.debug()) - logger.debug(msg); - break; - case LogLevel::INFORMATION: - if (logger.information()) - logger.information(msg); - break; - case LogLevel::NOTICE: - if (logger.notice()) - logger.notice(msg); - break; - case LogLevel::WARNING: - if (logger.warning()) - logger.warning(msg); - break; - case LogLevel::ERROR: - if (logger.error()) - logger.error(msg); - break; - case LogLevel::CRITICAL: - if (logger.critical()) - logger.critical(msg); - break; - case LogLevel::FATAL: - if (logger.fatal()) - logger.fatal(msg); - break; - } -} - -} diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 2909b838c84..043cc596e2b 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -99,9 +100,9 @@ void LocalServer::initialize(Poco::Util::Application & self) } } -void LocalServer::applyCmdSettings(Context & context) +void LocalServer::applyCmdSettings(ContextPtr context) { - context.applySettingsChanges(cmd_settings.changes()); + context->applySettingsChanges(cmd_settings.changes()); } /// If path is specified and not empty, will try to setup server environment and load existing metadata @@ -176,7 +177,7 @@ void LocalServer::tryInitPath() } -static void attachSystemTables(const Context & context) +static void attachSystemTables(ContextPtr context) { DatabasePtr system_database = DatabaseCatalog::instance().tryGetDatabase(DatabaseCatalog::SYSTEM_DATABASE); if (!system_database) @@ -211,7 +212,7 @@ try } shared_context = Context::createShared(); - global_context = std::make_unique(Context::createGlobal(shared_context.get())); + global_context = Context::createGlobal(shared_context.get()); global_context->makeGlobalContext(); global_context->setApplicationType(Context::ApplicationType::LOCAL); tryInitPath(); @@ -274,9 +275,9 @@ try * if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons. */ std::string default_database = config().getString("default_database", "_local"); - DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, *global_context)); + DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, global_context)); global_context->setCurrentDatabase(default_database); - applyCmdOptions(*global_context); + applyCmdOptions(global_context); if (config().has("path")) { @@ -288,15 +289,15 @@ try LOG_DEBUG(log, "Loading metadata from {}", path); Poco::File(path + "data/").createDirectories(); Poco::File(path + "metadata/").createDirectories(); - loadMetadataSystem(*global_context); - attachSystemTables(*global_context); - loadMetadata(*global_context); + loadMetadataSystem(global_context); + attachSystemTables(global_context); + loadMetadata(global_context); DatabaseCatalog::instance().loadDatabases(); LOG_DEBUG(log, "Loaded metadata."); } else if (!config().has("no-system-tables")) { - attachSystemTables(*global_context); + attachSystemTables(global_context); } processQueries(); @@ -375,23 +376,46 @@ void LocalServer::processQueries() /// we can't mutate global global_context (can lead to races, as it was already passed to some background threads) /// so we can't reuse it safely as a query context and need a copy here - auto context = Context(*global_context); + auto context = Context::createCopy(global_context); - context.makeSessionContext(); - context.makeQueryContext(); + context->makeSessionContext(); + context->makeQueryContext(); - context.setUser("default", "", Poco::Net::SocketAddress{}); - context.setCurrentQueryId(""); + context->setUser("default", "", Poco::Net::SocketAddress{}); + context->setCurrentQueryId(""); applyCmdSettings(context); /// Use the same query_id (and thread group) for all queries CurrentThread::QueryScope query_scope_holder(context); + ///Set progress show + progress_bar.need_render_progress = config().getBool("progress", false); + + if (progress_bar.need_render_progress) + { + context->setProgressCallback([&](const Progress & value) + { + if (!progress_bar.updateProgress(progress, value)) + { + // Just a keep-alive update. + return; + } + progress_bar.writeProgress(progress, watch.elapsed()); + }); + } + bool echo_queries = config().hasOption("echo") || config().hasOption("verbose"); std::exception_ptr exception; for (const auto & query : queries) { + watch.restart(); + progress.reset(); + progress_bar.show_progress_bar = false; + progress_bar.written_progress_chars = 0; + progress_bar.written_first_block = false; + + ReadBufferFromString read_buf(query); WriteBufferFromFileDescriptor write_buf(STDOUT_FILENO); @@ -548,6 +572,7 @@ void LocalServer::init(int argc, char ** argv) ("ignore-error", "do not stop processing if a query failed") ("no-system-tables", "do not attach system tables (better startup time)") ("version,V", "print version information and exit") + ("progress", "print progress of queries execution") ; cmd_settings.addProgramOptions(description); @@ -597,6 +622,8 @@ void LocalServer::init(int argc, char ** argv) if (options.count("stacktrace")) config().setBool("stacktrace", true); + if (options.count("progress")) + config().setBool("progress", true); if (options.count("echo")) config().setBool("echo", true); if (options.count("verbose")) @@ -618,9 +645,9 @@ void LocalServer::init(int argc, char ** argv) argsToConfig(arguments, config(), 100); } -void LocalServer::applyCmdOptions(Context & context) +void LocalServer::applyCmdOptions(ContextPtr context) { - context.setDefaultFormat(config().getString("output-format", config().getString("format", "TSV"))); + context->setDefaultFormat(config().getString("output-format", config().getString("format", "TSV"))); applyCmdSettings(context); } diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index 02778bd86cb..c5e9d5716dd 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -1,13 +1,13 @@ #pragma once -#include -#include #include #include #include -#include +#include #include - +#include +#include +#include namespace DB { @@ -36,18 +36,22 @@ private: std::string getInitialCreateTableQuery(); void tryInitPath(); - void applyCmdOptions(Context & context); - void applyCmdSettings(Context & context); + void applyCmdOptions(ContextPtr context); + void applyCmdSettings(ContextPtr context); void processQueries(); void setupUsers(); void cleanup(); + protected: SharedContextHolder shared_context; - std::unique_ptr global_context; + ContextPtr global_context; /// Settings specified via command line args Settings cmd_settings; + ProgressBar progress_bar; + Progress progress; + Stopwatch watch; std::optional temporary_directory_to_delete; }; diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index aea70ba0986..fb6817fbf80 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -365,16 +365,20 @@ static void transformFixedString(const UInt8 * src, UInt8 * dst, size_t size, UI } } -static void transformUUID(const UInt128 & src, UInt128 & dst, UInt64 seed) +static void transformUUID(const UUID & src_uuid, UUID & dst_uuid, UInt64 seed) { + const UInt128 & src = src_uuid.toUnderType(); + UInt128 & dst = dst_uuid.toUnderType(); + SipHash hash; hash.update(seed); - hash.update(reinterpret_cast(&src), sizeof(UInt128)); + hash.update(reinterpret_cast(&src), sizeof(UUID)); /// Saving version and variant from an old UUID hash.get128(reinterpret_cast(&dst)); - dst.high = (dst.high & 0x1fffffffffffffffull) | (src.high & 0xe000000000000000ull); - dst.low = (dst.low & 0xffffffffffff0fffull) | (src.low & 0x000000000000f000ull); + + dst.items[1] = (dst.items[1] & 0x1fffffffffffffffull) | (src.items[1] & 0xe000000000000000ull); + dst.items[0] = (dst.items[0] & 0xffffffffffff0fffull) | (src.items[0] & 0x000000000000f000ull); } class FixedStringModel : public IModel @@ -426,10 +430,10 @@ public: ColumnPtr generate(const IColumn & column) override { - const ColumnUInt128 & src_column = assert_cast(column); + const ColumnUUID & src_column = assert_cast(column); const auto & src_data = src_column.getData(); - auto res_column = ColumnUInt128::create(); + auto res_column = ColumnUUID::create(); auto & res_data = res_column->getData(); res_data.resize(src_data.size()); @@ -1129,8 +1133,8 @@ try } SharedContextHolder shared_context = Context::createShared(); - Context context = Context::createGlobal(shared_context.get()); - context.makeGlobalContext(); + ContextPtr context = Context::createGlobal(shared_context.get()); + context->makeGlobalContext(); ReadBufferFromFileDescriptor file_in(STDIN_FILENO); WriteBufferFromFileDescriptor file_out(STDOUT_FILENO); @@ -1152,7 +1156,7 @@ try if (!silent) std::cerr << "Training models\n"; - BlockInputStreamPtr input = context.getInputFormat(input_format, file_in, header, max_block_size); + BlockInputStreamPtr input = context->getInputFormat(input_format, file_in, header, max_block_size); input->readPrefix(); while (Block block = input->read()) @@ -1179,8 +1183,8 @@ try file_in.seek(0, SEEK_SET); - BlockInputStreamPtr input = context.getInputFormat(input_format, file_in, header, max_block_size); - BlockOutputStreamPtr output = context.getOutputStreamParallelIfPossible(output_format, file_out, header); + BlockInputStreamPtr input = context->getInputFormat(input_format, file_in, header, max_block_size); + BlockOutputStreamPtr output = context->getOutputStreamParallelIfPossible(output_format, file_out, header); if (processed_rows + source_rows > limit) input = std::make_shared(input, limit - processed_rows, 0); diff --git a/programs/odbc-bridge/CMakeLists.txt b/programs/odbc-bridge/CMakeLists.txt index c383d09767c..7b232f2b5dc 100644 --- a/programs/odbc-bridge/CMakeLists.txt +++ b/programs/odbc-bridge/CMakeLists.txt @@ -26,11 +26,12 @@ target_link_libraries(clickhouse-odbc-bridge PRIVATE dbms bridge clickhouse_parsers - Poco::Data - Poco::Data::ODBC + nanodbc + unixodbc ) set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) +target_compile_options (clickhouse-odbc-bridge PRIVATE -Wno-reserved-id-macro -Wno-keyword-macro) if (USE_GDB_ADD_INDEX) add_custom_command(TARGET clickhouse-odbc-bridge POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} ../clickhouse-odbc-bridge COMMENT "Adding .gdb-index to clickhouse-odbc-bridge" VERBATIM) diff --git a/programs/odbc-bridge/ColumnInfoHandler.cpp b/programs/odbc-bridge/ColumnInfoHandler.cpp index 14fa734f246..f4f575bb33d 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.cpp +++ b/programs/odbc-bridge/ColumnInfoHandler.cpp @@ -2,29 +2,36 @@ #if USE_ODBC -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include "getIdentifierQuote.h" -# include "validateODBCConnectionString.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "getIdentifierQuote.h" +#include "validateODBCConnectionString.h" +#include "ODBCConnectionFactory.h" + +#include +#include -# define POCO_SQL_ODBC_CLASS Poco::Data::ODBC namespace DB { + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; +} + namespace { DataTypePtr getDataType(SQLSMALLINT type) @@ -59,6 +66,7 @@ namespace } } + void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) { HTMLForm params(request, request.getStream()); @@ -77,88 +85,79 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ process_error("No 'table' param in request URL"); return; } + if (!params.has("connection_string")) { process_error("No 'connection_string' in request URL"); return; } + std::string schema_name; std::string table_name = params.get("table"); std::string connection_string = params.get("connection_string"); if (params.has("schema")) - { schema_name = params.get("schema"); - LOG_TRACE(log, "Will fetch info for table '{}'", schema_name + "." + table_name); - } - else - LOG_TRACE(log, "Will fetch info for table '{}'", table_name); + LOG_TRACE(log, "Got connection str '{}'", connection_string); try { const bool external_table_functions_use_nulls = Poco::NumberParser::parseBool(params.get("external_table_functions_use_nulls", "false")); - POCO_SQL_ODBC_CLASS::SessionImpl session(validateODBCConnectionString(connection_string), DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); - SQLHDBC hdbc = session.dbc().handle(); + auto connection = ODBCConnectionFactory::instance().get( + validateODBCConnectionString(connection_string), + getContext()->getSettingsRef().odbc_bridge_connection_pool_size); - SQLHSTMT hstmt = nullptr; + nanodbc::catalog catalog(connection->get()); + std::string catalog_name; - if (POCO_SQL_ODBC_CLASS::Utility::isError(SQLAllocStmt(hdbc, &hstmt))) - throw POCO_SQL_ODBC_CLASS::ODBCException("Could not allocate connection handle."); - - SCOPE_EXIT(SQLFreeStmt(hstmt, SQL_DROP)); - - const auto & context_settings = context.getSettingsRef(); - - /// TODO Why not do SQLColumns instead? - std::string name = schema_name.empty() ? backQuoteIfNeed(table_name) : backQuoteIfNeed(schema_name) + "." + backQuoteIfNeed(table_name); - WriteBufferFromOwnString buf; - std::string input = "SELECT * FROM " + name + " WHERE 1 = 0"; - ParserQueryWithOutput parser(input.data() + input.size()); - ASTPtr select = parseQuery(parser, input.data(), input.data() + input.size(), "", context_settings.max_query_size, context_settings.max_parser_depth); - - IAST::FormatSettings settings(buf, true); - settings.always_quote_identifiers = true; - settings.identifier_quoting_style = getQuotingStyle(hdbc); - select->format(settings); - std::string query = buf.str(); - - LOG_TRACE(log, "Inferring structure with query '{}'", query); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(POCO_SQL_ODBC_CLASS::SQLPrepare(hstmt, reinterpret_cast(query.data()), query.size()))) - throw POCO_SQL_ODBC_CLASS::DescriptorException(session.dbc()); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(SQLExecute(hstmt))) - throw POCO_SQL_ODBC_CLASS::StatementException(hstmt); - - SQLSMALLINT cols = 0; - if (POCO_SQL_ODBC_CLASS::Utility::isError(SQLNumResultCols(hstmt, &cols))) - throw POCO_SQL_ODBC_CLASS::StatementException(hstmt); - - /// TODO cols not checked - - NamesAndTypesList columns; - for (SQLSMALLINT ncol = 1; ncol <= cols; ++ncol) + /// In XDBC tables it is allowed to pass either database_name or schema_name in table definion, but not both of them. + /// They both are passed as 'schema' parameter in request URL, so it is not clear whether it is database_name or schema_name passed. + /// If it is schema_name then we know that database is added in odbc.ini. But if we have database_name as 'schema', + /// it is not guaranteed. For nanodbc database_name must be either in odbc.ini or passed as catalog_name. + auto get_columns = [&]() { - SQLSMALLINT type = 0; - /// TODO Why 301? - SQLCHAR column_name[301]; - - SQLSMALLINT is_nullable; - const auto result = POCO_SQL_ODBC_CLASS::SQLDescribeCol(hstmt, ncol, column_name, sizeof(column_name), nullptr, &type, nullptr, nullptr, &is_nullable); - if (POCO_SQL_ODBC_CLASS::Utility::isError(result)) - throw POCO_SQL_ODBC_CLASS::StatementException(hstmt); - - auto column_type = getDataType(type); - if (external_table_functions_use_nulls && is_nullable == SQL_NULLABLE) + nanodbc::catalog::tables tables = catalog.find_tables(table_name, /* type = */ "", /* schema = */ "", /* catalog = */ schema_name); + if (tables.next()) { - column_type = std::make_shared(column_type); + catalog_name = tables.table_catalog(); + LOG_TRACE(log, "Will fetch info for table '{}.{}'", catalog_name, table_name); + return catalog.find_columns(/* column = */ "", table_name, /* schema = */ "", catalog_name); } - columns.emplace_back(reinterpret_cast(column_name), std::move(column_type)); + tables = catalog.find_tables(table_name, /* type = */ "", /* schema = */ schema_name); + if (tables.next()) + { + catalog_name = tables.table_catalog(); + LOG_TRACE(log, "Will fetch info for table '{}.{}.{}'", catalog_name, schema_name, table_name); + return catalog.find_columns(/* column = */ "", table_name, schema_name, catalog_name); + } + + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table {} not found", schema_name.empty() ? table_name : schema_name + '.' + table_name); + }; + + nanodbc::catalog::columns columns_definition = get_columns(); + + NamesAndTypesList columns; + while (columns_definition.next()) + { + SQLSMALLINT type = columns_definition.sql_data_type(); + std::string column_name = columns_definition.column_name(); + + bool is_nullable = columns_definition.nullable() == SQL_NULLABLE; + + auto column_type = getDataType(type); + + if (external_table_functions_use_nulls && is_nullable == SQL_NULLABLE) + column_type = std::make_shared(column_type); + + columns.emplace_back(column_name, std::move(column_type)); } + if (columns.empty()) + throw Exception("Columns definition was not returned", ErrorCodes::LOGICAL_ERROR); + WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); try { diff --git a/programs/odbc-bridge/ColumnInfoHandler.h b/programs/odbc-bridge/ColumnInfoHandler.h index 9b5b470b31d..bc976f54aee 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.h +++ b/programs/odbc-bridge/ColumnInfoHandler.h @@ -2,24 +2,23 @@ #if USE_ODBC -# include -# include -# include +#include +#include +#include +#include +#include -# include -/** The structure of the table is taken from the query "SELECT * FROM table WHERE 1=0". - * TODO: It would be much better to utilize ODBC methods dedicated for columns description. - * If there is no such table, an exception is thrown. - */ namespace DB { -class ODBCColumnsInfoHandler : public HTTPRequestHandler +class ODBCColumnsInfoHandler : public HTTPRequestHandler, WithContext { public: - ODBCColumnsInfoHandler(size_t keep_alive_timeout_, Context & context_) - : log(&Poco::Logger::get("ODBCColumnsInfoHandler")), keep_alive_timeout(keep_alive_timeout_), context(context_) + ODBCColumnsInfoHandler(size_t keep_alive_timeout_, ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get("ODBCColumnsInfoHandler")) + , keep_alive_timeout(keep_alive_timeout_) { } @@ -28,7 +27,6 @@ public: private: Poco::Logger * log; size_t keep_alive_timeout; - Context & context; }; } diff --git a/programs/odbc-bridge/HandlerFactory.cpp b/programs/odbc-bridge/HandlerFactory.cpp index 3b99673e955..49984453d33 100644 --- a/programs/odbc-bridge/HandlerFactory.cpp +++ b/programs/odbc-bridge/HandlerFactory.cpp @@ -21,26 +21,26 @@ std::unique_ptr ODBCBridgeHandlerFactory::createRequestHandl if (uri.getPath() == "/columns_info") #if USE_ODBC - return std::make_unique(keep_alive_timeout, context); + return std::make_unique(keep_alive_timeout, getContext()); #else return nullptr; #endif else if (uri.getPath() == "/identifier_quote") #if USE_ODBC - return std::make_unique(keep_alive_timeout, context); + return std::make_unique(keep_alive_timeout, getContext()); #else return nullptr; #endif else if (uri.getPath() == "/schema_allowed") #if USE_ODBC - return std::make_unique(keep_alive_timeout, context); + return std::make_unique(keep_alive_timeout, getContext()); #else return nullptr; #endif else if (uri.getPath() == "/write") - return std::make_unique(pool_map, keep_alive_timeout, context, "write"); + return std::make_unique(keep_alive_timeout, getContext(), "write"); else - return std::make_unique(pool_map, keep_alive_timeout, context, "read"); + return std::make_unique(keep_alive_timeout, getContext(), "read"); } return nullptr; } diff --git a/programs/odbc-bridge/HandlerFactory.h b/programs/odbc-bridge/HandlerFactory.h index 9db183a2f9c..ffbbe3670af 100644 --- a/programs/odbc-bridge/HandlerFactory.h +++ b/programs/odbc-bridge/HandlerFactory.h @@ -1,32 +1,28 @@ #pragma once -#include +#include #include #include "ColumnInfoHandler.h" #include "IdentifierQuoteHandler.h" #include "MainHandler.h" #include "SchemaAllowedHandler.h" - #include -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#include -#pragma GCC diagnostic pop - namespace DB { /** Factory for '/ping', '/', '/columns_info', '/identifier_quote', '/schema_allowed' handlers. * Also stores Session pools for ODBC connections */ -class ODBCBridgeHandlerFactory : public HTTPRequestHandlerFactory +class ODBCBridgeHandlerFactory : public HTTPRequestHandlerFactory, WithContext { public: - ODBCBridgeHandlerFactory(const std::string & name_, size_t keep_alive_timeout_, Context & context_) - : log(&Poco::Logger::get(name_)), name(name_), keep_alive_timeout(keep_alive_timeout_), context(context_) + ODBCBridgeHandlerFactory(const std::string & name_, size_t keep_alive_timeout_, ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get(name_)) + , name(name_) + , keep_alive_timeout(keep_alive_timeout_) { - pool_map = std::make_shared(); } std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; @@ -35,8 +31,6 @@ private: Poco::Logger * log; std::string name; size_t keep_alive_timeout; - Context & context; - std::shared_ptr pool_map; }; } diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.cpp b/programs/odbc-bridge/IdentifierQuoteHandler.cpp index 5060d37c479..124a5c420f8 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.cpp +++ b/programs/odbc-bridge/IdentifierQuoteHandler.cpp @@ -2,23 +2,20 @@ #if USE_ODBC -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include "getIdentifierQuote.h" -# include "validateODBCConnectionString.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "getIdentifierQuote.h" +#include "validateODBCConnectionString.h" +#include "ODBCConnectionFactory.h" -# define POCO_SQL_ODBC_CLASS Poco::Data::ODBC namespace DB { @@ -44,10 +41,12 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ try { std::string connection_string = params.get("connection_string"); - POCO_SQL_ODBC_CLASS::SessionImpl session(validateODBCConnectionString(connection_string), DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); - SQLHDBC hdbc = session.dbc().handle(); - auto identifier = getIdentifierQuote(hdbc); + auto connection = ODBCConnectionFactory::instance().get( + validateODBCConnectionString(connection_string), + getContext()->getSettingsRef().odbc_bridge_connection_pool_size); + + auto identifier = getIdentifierQuote(connection->get()); WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); try diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.h b/programs/odbc-bridge/IdentifierQuoteHandler.h index dad88c72ad8..ef3806fd802 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.h +++ b/programs/odbc-bridge/IdentifierQuoteHandler.h @@ -11,11 +11,13 @@ namespace DB { -class IdentifierQuoteHandler : public HTTPRequestHandler +class IdentifierQuoteHandler : public HTTPRequestHandler, WithContext { public: - IdentifierQuoteHandler(size_t keep_alive_timeout_, Context &) - : log(&Poco::Logger::get("IdentifierQuoteHandler")), keep_alive_timeout(keep_alive_timeout_) + IdentifierQuoteHandler(size_t keep_alive_timeout_, ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get("IdentifierQuoteHandler")) + , keep_alive_timeout(keep_alive_timeout_) { } diff --git a/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp index 079fc371ab4..ffa636e8b49 100644 --- a/programs/odbc-bridge/MainHandler.cpp +++ b/programs/odbc-bridge/MainHandler.cpp @@ -23,13 +23,9 @@ #include -#if USE_ODBC -#include -#define POCO_SQL_ODBC_CLASS Poco::Data::ODBC -#endif - namespace DB { + namespace { std::unique_ptr parseColumns(std::string && column_string) @@ -42,37 +38,6 @@ namespace } } -using PocoSessionPoolConstructor = std::function()>; -/** Is used to adjust max size of default Poco thread pool. See issue #750 - * Acquire the lock, resize pool and construct new Session. - */ -static std::shared_ptr createAndCheckResizePocoSessionPool(PocoSessionPoolConstructor pool_constr) -{ - static std::mutex mutex; - - Poco::ThreadPool & pool = Poco::ThreadPool::defaultPool(); - - /// NOTE: The lock don't guarantee that external users of the pool don't change its capacity - std::unique_lock lock(mutex); - - if (pool.available() == 0) - pool.addCapacity(2 * std::max(pool.capacity(), 1)); - - return pool_constr(); -} - -ODBCHandler::PoolPtr ODBCHandler::getPool(const std::string & connection_str) -{ - std::lock_guard lock(mutex); - if (!pool_map->count(connection_str)) - { - pool_map->emplace(connection_str, createAndCheckResizePocoSessionPool([connection_str] - { - return std::make_shared("ODBC", validateODBCConnectionString(connection_str)); - })); - } - return pool_map->at(connection_str); -} void ODBCHandler::processError(HTTPServerResponse & response, const std::string & message) { @@ -82,12 +47,14 @@ void ODBCHandler::processError(HTTPServerResponse & response, const std::string LOG_WARNING(log, message); } + void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) { HTMLForm params(request); + LOG_TRACE(log, "Request URI: {}", request.getURI()); + if (mode == "read") params.read(request.getStream()); - LOG_TRACE(log, "Request URI: {}", request.getURI()); if (mode == "read" && !params.has("query")) { @@ -95,11 +62,6 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse return; } - if (!params.has("columns")) - { - processError(response, "No 'columns' in request URL"); - return; - } if (!params.has("connection_string")) { @@ -107,6 +69,16 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse return; } + if (!params.has("sample_block")) + { + processError(response, "No 'sample_block' in request URL"); + return; + } + + std::string format = params.get("format", "RowBinary"); + std::string connection_string = params.get("connection_string"); + LOG_TRACE(log, "Connection string: '{}'", connection_string); + UInt64 max_block_size = DEFAULT_BLOCK_SIZE; if (params.has("max_block_size")) { @@ -119,28 +91,27 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse max_block_size = parse(max_block_size_str); } - std::string columns = params.get("columns"); + std::string sample_block_string = params.get("sample_block"); std::unique_ptr sample_block; try { - sample_block = parseColumns(std::move(columns)); + sample_block = parseColumns(std::move(sample_block_string)); } catch (const Exception & ex) { - processError(response, "Invalid 'columns' parameter in request body '" + ex.message() + "'"); - LOG_WARNING(log, ex.getStackTraceString()); + processError(response, "Invalid 'sample_block' parameter in request body '" + ex.message() + "'"); + LOG_ERROR(log, ex.getStackTraceString()); return; } - std::string format = params.get("format", "RowBinary"); - - std::string connection_string = params.get("connection_string"); - LOG_TRACE(log, "Connection string: '{}'", connection_string); - WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); try { + auto connection = ODBCConnectionFactory::instance().get( + validateODBCConnectionString(connection_string), + getContext()->getSettingsRef().odbc_bridge_connection_pool_size); + if (mode == "write") { if (!params.has("db_name")) @@ -159,15 +130,12 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse auto quoting_style = IdentifierQuotingStyle::None; #if USE_ODBC - POCO_SQL_ODBC_CLASS::SessionImpl session(validateODBCConnectionString(connection_string), DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); - quoting_style = getQuotingStyle(session.dbc().handle()); + quoting_style = getQuotingStyle(connection->get()); #endif - - auto pool = getPool(connection_string); auto & read_buf = request.getStream(); - auto input_format = FormatFactory::instance().getInput(format, read_buf, *sample_block, context, max_block_size); + auto input_format = FormatFactory::instance().getInput(format, read_buf, *sample_block, getContext(), max_block_size); auto input_stream = std::make_shared(input_format); - ODBCBlockOutputStream output_stream(pool->get(), db_name, table_name, *sample_block, quoting_style); + ODBCBlockOutputStream output_stream(std::move(connection), db_name, table_name, *sample_block, getContext(), quoting_style); copyData(*input_stream, output_stream); writeStringBinary("Ok.", out); } @@ -176,9 +144,8 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse std::string query = params.get("query"); LOG_TRACE(log, "Query: {}", query); - BlockOutputStreamPtr writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, out, *sample_block, context); - auto pool = getPool(connection_string); - ODBCBlockInputStream inp(pool->get(), query, *sample_block, max_block_size); + BlockOutputStreamPtr writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, out, *sample_block, getContext()); + ODBCBlockInputStream inp(std::move(connection), query, *sample_block, max_block_size); copyData(inp, *writer); } } diff --git a/programs/odbc-bridge/MainHandler.h b/programs/odbc-bridge/MainHandler.h index e237ede5814..bc0fca8b9a5 100644 --- a/programs/odbc-bridge/MainHandler.h +++ b/programs/odbc-bridge/MainHandler.h @@ -1,14 +1,13 @@ #pragma once -#include +#include #include - #include -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#include -#pragma GCC diagnostic pop + +#include +#include + namespace DB { @@ -17,20 +16,16 @@ namespace DB * and also query in request body * response in RowBinary format */ -class ODBCHandler : public HTTPRequestHandler +class ODBCHandler : public HTTPRequestHandler, WithContext { public: - using PoolPtr = std::shared_ptr; - using PoolMap = std::unordered_map; - - ODBCHandler(std::shared_ptr pool_map_, + ODBCHandler( size_t keep_alive_timeout_, - Context & context_, + ContextPtr context_, const String & mode_) - : log(&Poco::Logger::get("ODBCHandler")) - , pool_map(pool_map_) + : WithContext(context_) + , log(&Poco::Logger::get("ODBCHandler")) , keep_alive_timeout(keep_alive_timeout_) - , context(context_) , mode(mode_) { } @@ -40,14 +35,11 @@ public: private: Poco::Logger * log; - std::shared_ptr pool_map; size_t keep_alive_timeout; - Context & context; String mode; static inline std::mutex mutex; - PoolPtr getPool(const std::string & connection_str); void processError(HTTPServerResponse & response, const std::string & message); }; diff --git a/programs/odbc-bridge/ODBCBlockInputStream.cpp b/programs/odbc-bridge/ODBCBlockInputStream.cpp index b8a4209ac94..c695c8db9cf 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -1,5 +1,7 @@ #include "ODBCBlockInputStream.h" #include +#include +#include #include #include #include @@ -14,137 +16,142 @@ namespace DB { namespace ErrorCodes { - extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH; extern const int UNKNOWN_TYPE; } ODBCBlockInputStream::ODBCBlockInputStream( - Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_) - : session{session_} - , statement{(this->session << query_str, Poco::Data::Keywords::now)} - , result{statement} - , iterator{result.begin()} + nanodbc::ConnectionHolderPtr connection, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_) + : log(&Poco::Logger::get("ODBCBlockInputStream")) , max_block_size{max_block_size_} - , log(&Poco::Logger::get("ODBCBlockInputStream")) + , query(query_str) { - if (sample_block.columns() != result.columnCount()) - throw Exception{"RecordSet contains " + toString(result.columnCount()) + " columns while " + toString(sample_block.columns()) - + " expected", - ErrorCodes::NUMBER_OF_COLUMNS_DOESNT_MATCH}; - description.init(sample_block); -} - - -namespace -{ - using ValueType = ExternalResultDescription::ValueType; - - void insertValue(IColumn & column, const ValueType type, const Poco::Dynamic::Var & value) - { - switch (type) - { - case ValueType::vtUInt8: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtUInt16: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtUInt32: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtUInt64: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtInt8: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtInt16: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtInt32: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtInt64: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtFloat32: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtFloat64: - assert_cast(column).insertValue(value.convert()); - break; - case ValueType::vtString: - assert_cast(column).insert(value.convert()); - break; - case ValueType::vtDate: - { - Poco::DateTime date = value.convert(); - assert_cast(column).insertValue(UInt16{LocalDate(date.year(), date.month(), date.day()).getDayNum()}); - break; - } - case ValueType::vtDateTime: - { - Poco::DateTime datetime = value.convert(); - assert_cast(column).insertValue(DateLUT::instance().makeDateTime( - datetime.year(), datetime.month(), datetime.day(), datetime.hour(), datetime.minute(), datetime.second())); - break; - } - case ValueType::vtUUID: - assert_cast(column).insert(parse(value.convert())); - break; - default: - throw Exception("Unsupported value type", ErrorCodes::UNKNOWN_TYPE); - } - } - - void insertDefaultValue(IColumn & column, const IColumn & sample_column) { column.insertFrom(sample_column, 0); } + result = execute(connection->get(), NANODBC_TEXT(query)); } Block ODBCBlockInputStream::readImpl() { - if (iterator == result.end()) - return {}; - - MutableColumns columns(description.sample_block.columns()); - for (const auto i : ext::range(0, columns.size())) - columns[i] = description.sample_block.getByPosition(i).column->cloneEmpty(); + if (finished) + return Block(); + MutableColumns columns(description.sample_block.cloneEmptyColumns()); size_t num_rows = 0; - while (iterator != result.end()) + + while (true) { - Poco::Data::Row & row = *iterator; - - for (const auto idx : ext::range(0, row.fieldCount())) + if (!result.next()) { - /// TODO This is extremely slow. - const Poco::Dynamic::Var & value = row[idx]; + finished = true; + break; + } - if (!value.isEmpty()) + for (int idx = 0; idx < result.columns(); ++idx) + { + const auto & sample = description.sample_block.getByPosition(idx); + + if (!result.is_null(idx)) { - if (description.types[idx].second) + bool is_nullable = description.types[idx].second; + + if (is_nullable) { ColumnNullable & column_nullable = assert_cast(*columns[idx]); - insertValue(column_nullable.getNestedColumn(), description.types[idx].first, value); + const auto & data_type = assert_cast(*sample.type); + insertValue(column_nullable.getNestedColumn(), data_type.getNestedType(), description.types[idx].first, result, idx); column_nullable.getNullMapData().emplace_back(0); } else - insertValue(*columns[idx], description.types[idx].first, value); + { + insertValue(*columns[idx], sample.type, description.types[idx].first, result, idx); + } } else - insertDefaultValue(*columns[idx], *description.sample_block.getByPosition(idx).column); + insertDefaultValue(*columns[idx], *sample.column); } - ++iterator; - - ++num_rows; - if (num_rows == max_block_size) + if (++num_rows == max_block_size) break; } return description.sample_block.cloneWithColumns(std::move(columns)); } + +void ODBCBlockInputStream::insertValue( + IColumn & column, const DataTypePtr data_type, const ValueType type, nanodbc::result & row, size_t idx) +{ + switch (type) + { + case ValueType::vtUInt8: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtUInt16: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtUInt32: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtUInt64: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtInt8: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtInt16: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtInt32: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtInt64: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtFloat32: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtFloat64: + assert_cast(column).insertValue(row.get(idx)); + break; + case ValueType::vtFixedString:[[fallthrough]]; + case ValueType::vtString: + assert_cast(column).insert(row.get(idx)); + break; + case ValueType::vtUUID: + { + auto value = row.get(idx); + assert_cast(column).insert(parse(value.data(), value.size())); + break; + } + case ValueType::vtDate: + assert_cast(column).insertValue(UInt16{LocalDate{row.get(idx)}.getDayNum()}); + break; + case ValueType::vtDateTime: + { + auto value = row.get(idx); + ReadBufferFromString in(value); + time_t time = 0; + readDateTimeText(time, in); + if (time < 0) + time = 0; + assert_cast(column).insertValue(time); + break; + } + case ValueType::vtDateTime64:[[fallthrough]]; + case ValueType::vtDecimal32: [[fallthrough]]; + case ValueType::vtDecimal64: [[fallthrough]]; + case ValueType::vtDecimal128: [[fallthrough]]; + case ValueType::vtDecimal256: + { + auto value = row.get(idx); + ReadBufferFromString istr(value); + data_type->getDefaultSerialization()->deserializeWholeText(column, istr, FormatSettings{}); + break; + } + default: + throw Exception("Unsupported value type", ErrorCodes::UNKNOWN_TYPE); + } +} + } diff --git a/programs/odbc-bridge/ODBCBlockInputStream.h b/programs/odbc-bridge/ODBCBlockInputStream.h index 13491e05822..26aa766dbcc 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.h +++ b/programs/odbc-bridge/ODBCBlockInputStream.h @@ -3,10 +3,8 @@ #include #include #include -#include -#include -#include #include +#include "ODBCConnectionFactory.h" namespace DB @@ -15,25 +13,32 @@ namespace DB class ODBCBlockInputStream final : public IBlockInputStream { public: - ODBCBlockInputStream( - Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_); + ODBCBlockInputStream(nanodbc::ConnectionHolderPtr connection, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_); String getName() const override { return "ODBC"; } Block getHeader() const override { return description.sample_block.cloneEmpty(); } private: + using QueryResult = std::shared_ptr; + using ValueType = ExternalResultDescription::ValueType; + Block readImpl() override; - Poco::Data::Session session; - Poco::Data::Statement statement; - Poco::Data::RecordSet result; - Poco::Data::RecordSet::Iterator iterator; + static void insertValue(IColumn & column, const DataTypePtr data_type, const ValueType type, nanodbc::result & row, size_t idx); + static void insertDefaultValue(IColumn & column, const IColumn & sample_column) + { + column.insertFrom(sample_column, 0); + } + + Poco::Logger * log; const UInt64 max_block_size; ExternalResultDescription description; - Poco::Logger * log; + nanodbc::result result; + String query; + bool finished = false; }; } diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.cpp b/programs/odbc-bridge/ODBCBlockOutputStream.cpp index db3c9441419..dc965b3b2a7 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockOutputStream.cpp @@ -1,5 +1,6 @@ #include "ODBCBlockOutputStream.h" +#include #include #include #include @@ -8,16 +9,14 @@ #include #include #include "getIdentifierQuote.h" +#include +#include +#include namespace DB { -namespace ErrorCodes -{ - extern const int UNKNOWN_TYPE; -} - namespace { using ValueType = ExternalResultDescription::ValueType; @@ -39,70 +38,21 @@ namespace query.IAST::format(settings); return buf.str(); } - - std::string getQuestionMarks(size_t n) - { - std::string result = "("; - for (size_t i = 0; i < n; ++i) - { - if (i > 0) - result += ","; - result += "?"; - } - return result + ")"; - } - - Poco::Dynamic::Var getVarFromField(const Field & field, const ValueType type) - { - switch (type) - { - case ValueType::vtUInt8: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtUInt16: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtUInt32: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtUInt64: - return Poco::Dynamic::Var(field.get()).convert(); - case ValueType::vtInt8: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtInt16: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtInt32: - return Poco::Dynamic::Var(static_cast(field.get())).convert(); - case ValueType::vtInt64: - return Poco::Dynamic::Var(field.get()).convert(); - case ValueType::vtFloat32: - return Poco::Dynamic::Var(field.get()).convert(); - case ValueType::vtFloat64: - return Poco::Dynamic::Var(field.get()).convert(); - case ValueType::vtString: - return Poco::Dynamic::Var(field.get()).convert(); - case ValueType::vtDate: - return Poco::Dynamic::Var(LocalDate(DayNum(field.get())).toString()).convert(); - case ValueType::vtDateTime: - return Poco::Dynamic::Var(DateLUT::instance().timeToString(time_t(field.get()))).convert(); - case ValueType::vtUUID: - return Poco::Dynamic::Var(UUID(field.get()).toUnderType().toHexString()).convert(); - default: - throw Exception("Unsupported value type", ErrorCodes::UNKNOWN_TYPE); - - } - __builtin_unreachable(); - } } -ODBCBlockOutputStream::ODBCBlockOutputStream(Poco::Data::Session && session_, +ODBCBlockOutputStream::ODBCBlockOutputStream(nanodbc::ConnectionHolderPtr connection_, const std::string & remote_database_name_, const std::string & remote_table_name_, const Block & sample_block_, + ContextPtr local_context_, IdentifierQuotingStyle quoting_) - : session(session_) + : log(&Poco::Logger::get("ODBCBlockOutputStream")) + , connection(std::move(connection_)) , db_name(remote_database_name_) , table_name(remote_table_name_) , sample_block(sample_block_) + , local_context(local_context_) , quoting(quoting_) - , log(&Poco::Logger::get("ODBCBlockOutputStream")) { description.init(sample_block); } @@ -114,28 +64,12 @@ Block ODBCBlockOutputStream::getHeader() const void ODBCBlockOutputStream::write(const Block & block) { - ColumnsWithTypeAndName columns; - for (size_t i = 0; i < block.columns(); ++i) - columns.push_back({block.getColumns()[i], sample_block.getDataTypes()[i], sample_block.getNames()[i]}); + WriteBufferFromOwnString values_buf; + auto writer = FormatFactory::instance().getOutputStream("Values", values_buf, sample_block, local_context); + writer->write(block); - std::vector row_to_insert(block.columns()); - Poco::Data::Statement statement(session << getInsertQuery(db_name, table_name, columns, quoting) + getQuestionMarks(block.columns())); - for (size_t i = 0; i < block.columns(); ++i) - statement.addBind(Poco::Data::Keywords::use(row_to_insert[i])); - - for (size_t i = 0; i < block.rows(); ++i) - { - for (size_t col_idx = 0; col_idx < block.columns(); ++col_idx) - { - Field val; - columns[col_idx].column->get(i, val); - if (val.isNull()) - row_to_insert[col_idx] = Poco::Dynamic::Var(); - else - row_to_insert[col_idx] = getVarFromField(val, description.types[col_idx].first); - } - statement.execute(); - } + std::string query = getInsertQuery(db_name, table_name, block.getColumnsWithTypeAndName(), quoting) + values_buf.str(); + execute(connection->get(), query); } } diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.h b/programs/odbc-bridge/ODBCBlockOutputStream.h index 39e1d6f77ac..c370a0a9c7b 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.h +++ b/programs/odbc-bridge/ODBCBlockOutputStream.h @@ -2,30 +2,41 @@ #include #include -#include #include #include +#include +#include "ODBCConnectionFactory.h" + namespace DB { + class ODBCBlockOutputStream : public IBlockOutputStream { + public: - ODBCBlockOutputStream(Poco::Data::Session && session_, const std::string & remote_database_name_, - const std::string & remote_table_name_, const Block & sample_block_, IdentifierQuotingStyle quoting); + ODBCBlockOutputStream( + nanodbc::ConnectionHolderPtr connection_, + const std::string & remote_database_name_, + const std::string & remote_table_name_, + const Block & sample_block_, + ContextPtr local_context_, + IdentifierQuotingStyle quoting); Block getHeader() const override; void write(const Block & block) override; private: - Poco::Data::Session session; + Poco::Logger * log; + + nanodbc::ConnectionHolderPtr connection; std::string db_name; std::string table_name; Block sample_block; + ContextPtr local_context; IdentifierQuotingStyle quoting; ExternalResultDescription description; - Poco::Logger * log; }; } diff --git a/programs/odbc-bridge/ODBCBridge.h b/programs/odbc-bridge/ODBCBridge.h index 598167835c6..b17051dce91 100644 --- a/programs/odbc-bridge/ODBCBridge.h +++ b/programs/odbc-bridge/ODBCBridge.h @@ -13,12 +13,12 @@ class ODBCBridge : public IBridge { protected: - const std::string bridgeName() const override + std::string bridgeName() const override { return "ODBCBridge"; } - HandlerFactoryPtr getHandlerFactoryPtr(Context & context) const override + HandlerFactoryPtr getHandlerFactoryPtr(ContextPtr context) const override { return std::make_shared("ODBCRequestHandlerFactory-factory", keep_alive_timeout, context); } diff --git a/programs/odbc-bridge/ODBCConnectionFactory.h b/programs/odbc-bridge/ODBCConnectionFactory.h new file mode 100644 index 00000000000..41ed5f1b31f --- /dev/null +++ b/programs/odbc-bridge/ODBCConnectionFactory.h @@ -0,0 +1,98 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int NO_FREE_CONNECTION; +} +} + +namespace nanodbc +{ + +using ConnectionPtr = std::unique_ptr; +using Pool = BorrowedObjectPool; +using PoolPtr = std::shared_ptr; + +class ConnectionHolder +{ +public: + ConnectionHolder(PoolPtr pool_, ConnectionPtr connection_) : pool(pool_), connection(std::move(connection_)) {} + + ConnectionHolder(const ConnectionHolder & other) = delete; + + ~ConnectionHolder() { pool->returnObject(std::move(connection)); } + + nanodbc::connection & get() const + { + assert(connection != nullptr); + return *connection; + } + +private: + PoolPtr pool; + ConnectionPtr connection; +}; + +using ConnectionHolderPtr = std::unique_ptr; +} + + +namespace DB +{ + +static constexpr inline auto ODBC_CONNECT_TIMEOUT = 100; +static constexpr inline auto ODBC_POOL_WAIT_TIMEOUT = 10000; + +class ODBCConnectionFactory final : private boost::noncopyable +{ +public: + static ODBCConnectionFactory & instance() + { + static ODBCConnectionFactory ret; + return ret; + } + + nanodbc::ConnectionHolderPtr get(const std::string & connection_string, size_t pool_size) + { + std::lock_guard lock(mutex); + + if (!factory.count(connection_string)) + factory.emplace(std::make_pair(connection_string, std::make_shared(pool_size))); + + auto & pool = factory[connection_string]; + + nanodbc::ConnectionPtr connection; + auto connection_available = pool->tryBorrowObject(connection, []() { return nullptr; }, ODBC_POOL_WAIT_TIMEOUT); + + if (!connection_available) + throw Exception("Unable to fetch connection within the timeout", ErrorCodes::NO_FREE_CONNECTION); + + try + { + if (!connection || !connection->connected()) + connection = std::make_unique(connection_string, ODBC_CONNECT_TIMEOUT); + } + catch (...) + { + pool->returnObject(std::move(connection)); + } + + return std::make_unique(factory[connection_string], std::move(connection)); + } + +private: + /// [connection_settings_string] -> [connection_pool] + using PoolFactory = std::unordered_map; + PoolFactory factory; + std::mutex mutex; +}; + +} diff --git a/programs/odbc-bridge/SchemaAllowedHandler.cpp b/programs/odbc-bridge/SchemaAllowedHandler.cpp index d4a70db61f4..3a20148780d 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.cpp +++ b/programs/odbc-bridge/SchemaAllowedHandler.cpp @@ -2,33 +2,26 @@ #if USE_ODBC -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include "validateODBCConnectionString.h" +#include +#include +#include +#include +#include +#include +#include "validateODBCConnectionString.h" +#include "ODBCConnectionFactory.h" +#include +#include -# define POCO_SQL_ODBC_CLASS Poco::Data::ODBC namespace DB { namespace { - bool isSchemaAllowed(SQLHDBC hdbc) + bool isSchemaAllowed(nanodbc::connection & connection) { - SQLUINTEGER value; - SQLSMALLINT value_length = sizeof(value); - SQLRETURN r = POCO_SQL_ODBC_CLASS::SQLGetInfo(hdbc, SQL_SCHEMA_USAGE, &value, sizeof(value), &value_length); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(r)) - throw POCO_SQL_ODBC_CLASS::ConnectionException(hdbc); - - return value != 0; + uint32_t result = connection.get_info(SQL_SCHEMA_USAGE); + return result != 0; } } @@ -55,10 +48,12 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer try { std::string connection_string = params.get("connection_string"); - POCO_SQL_ODBC_CLASS::SessionImpl session(validateODBCConnectionString(connection_string), DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); - SQLHDBC hdbc = session.dbc().handle(); - bool result = isSchemaAllowed(hdbc); + auto connection = ODBCConnectionFactory::instance().get( + validateODBCConnectionString(connection_string), + getContext()->getSettingsRef().odbc_bridge_connection_pool_size); + + bool result = isSchemaAllowed(connection->get()); WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); try diff --git a/programs/odbc-bridge/SchemaAllowedHandler.h b/programs/odbc-bridge/SchemaAllowedHandler.h index 91eddf67803..d7b922ed05b 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.h +++ b/programs/odbc-bridge/SchemaAllowedHandler.h @@ -1,22 +1,25 @@ #pragma once +#include #include - #include #if USE_ODBC + namespace DB { class Context; /// This handler establishes connection to database, and retrieves whether schema is allowed. -class SchemaAllowedHandler : public HTTPRequestHandler +class SchemaAllowedHandler : public HTTPRequestHandler, WithContext { public: - SchemaAllowedHandler(size_t keep_alive_timeout_, Context &) - : log(&Poco::Logger::get("SchemaAllowedHandler")), keep_alive_timeout(keep_alive_timeout_) + SchemaAllowedHandler(size_t keep_alive_timeout_, ContextPtr context_) + : WithContext(context_) + , log(&Poco::Logger::get("SchemaAllowedHandler")) + , keep_alive_timeout(keep_alive_timeout_) { } diff --git a/programs/odbc-bridge/getIdentifierQuote.cpp b/programs/odbc-bridge/getIdentifierQuote.cpp index 15b3749d37d..586e3c4e5dd 100644 --- a/programs/odbc-bridge/getIdentifierQuote.cpp +++ b/programs/odbc-bridge/getIdentifierQuote.cpp @@ -2,11 +2,9 @@ #if USE_ODBC -# include -# include -# include - -# define POCO_SQL_ODBC_CLASS Poco::Data::ODBC +#include +#include +#include namespace DB @@ -17,33 +15,27 @@ namespace ErrorCodes extern const int ILLEGAL_TYPE_OF_ARGUMENT; } -std::string getIdentifierQuote(SQLHDBC hdbc) + +std::string getIdentifierQuote(nanodbc::connection & connection) { - std::string identifier; - - SQLSMALLINT t; - SQLRETURN r = POCO_SQL_ODBC_CLASS::SQLGetInfo(hdbc, SQL_IDENTIFIER_QUOTE_CHAR, nullptr, 0, &t); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(r)) - throw POCO_SQL_ODBC_CLASS::ConnectionException(hdbc); - - if (t > 0) + std::string quote; + try { - // I have no idea, why to add '2' here, got from: contrib/poco/Data/ODBC/src/ODBCStatementImpl.cpp:60 (SQL_DRIVER_NAME) - identifier.resize(static_cast(t) + 2); - - if (POCO_SQL_ODBC_CLASS::Utility::isError(POCO_SQL_ODBC_CLASS::SQLGetInfo( - hdbc, SQL_IDENTIFIER_QUOTE_CHAR, &identifier[0], SQLSMALLINT((identifier.length() - 1) * sizeof(identifier[0])), &t))) - throw POCO_SQL_ODBC_CLASS::ConnectionException(hdbc); - - identifier.resize(static_cast(t)); + quote = connection.get_info(SQL_IDENTIFIER_QUOTE_CHAR); } - return identifier; + catch (...) + { + LOG_WARNING(&Poco::Logger::get("ODBCGetIdentifierQuote"), "Cannot fetch identifier quote. Default double quote is used. Reason: {}", getCurrentExceptionMessage(false)); + return "\""; + } + + return quote; } -IdentifierQuotingStyle getQuotingStyle(SQLHDBC hdbc) + +IdentifierQuotingStyle getQuotingStyle(nanodbc::connection & connection) { - auto identifier_quote = getIdentifierQuote(hdbc); + auto identifier_quote = getIdentifierQuote(connection); if (identifier_quote.length() == 0) return IdentifierQuotingStyle::None; else if (identifier_quote[0] == '`') diff --git a/programs/odbc-bridge/getIdentifierQuote.h b/programs/odbc-bridge/getIdentifierQuote.h index 0fb4c3bddb1..7f7156eff82 100644 --- a/programs/odbc-bridge/getIdentifierQuote.h +++ b/programs/odbc-bridge/getIdentifierQuote.h @@ -2,20 +2,19 @@ #if USE_ODBC -# include -# include -# include - -# include - +#include +#include +#include #include +#include + namespace DB { -std::string getIdentifierQuote(SQLHDBC hdbc); +std::string getIdentifierQuote(nanodbc::connection & connection); -IdentifierQuotingStyle getQuotingStyle(SQLHDBC hdbc); +IdentifierQuotingStyle getQuotingStyle(nanodbc::connection & connection); } diff --git a/programs/server/.gitignore b/programs/server/.gitignore index b774776e4be..ddc480e4b29 100644 --- a/programs/server/.gitignore +++ b/programs/server/.gitignore @@ -1,8 +1,11 @@ -/access -/dictionaries_lib -/flags -/format_schemas +/metadata /metadata_dropped +/data +/store +/access +/flags +/dictionaries_lib +/format_schemas /preprocessed_configs /shadow /tmp diff --git a/programs/server/CMakeLists.txt b/programs/server/CMakeLists.txt index 697851b294b..0dcfbce1c30 100644 --- a/programs/server/CMakeLists.txt +++ b/programs/server/CMakeLists.txt @@ -19,6 +19,7 @@ set (CLICKHOUSE_SERVER_LINK clickhouse_storages_system clickhouse_table_functions string_utils + jemalloc ${LINK_RESOURCE_LIB} @@ -28,7 +29,7 @@ set (CLICKHOUSE_SERVER_LINK clickhouse_program_add(server) -install(FILES config.xml users.xml DESTINATION ${CLICKHOUSE_ETC_DIR}/clickhouse-server COMPONENT clickhouse) +install(FILES config.xml users.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/clickhouse-server" COMPONENT clickhouse) # TODO We actually need this on Mac, FreeBSD. if (OS_LINUX) @@ -45,12 +46,12 @@ if (OS_LINUX) # PPC64LE fails to do this with objcopy, use ld or lld instead if (ARCH_PPC64LE) add_custom_command(OUTPUT ${RESOURCE_OBJ} - COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${CMAKE_LINKER} -m elf64lppc -r -b binary -o ${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ} ${RESOURCE_FILE}) + COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${CMAKE_LINKER} -m elf64lppc -r -b binary -o "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" ${RESOURCE_FILE}) else() add_custom_command(OUTPUT ${RESOURCE_OBJ} - COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} ${RESOURCE_FILE} ${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ} + COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} ${RESOURCE_FILE} "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" COMMAND ${OBJCOPY_PATH} --rename-section .data=.rodata,alloc,load,readonly,data,contents - ${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ} ${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}) + "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}" "${CMAKE_CURRENT_BINARY_DIR}/${RESOURCE_OBJ}") endif() set_source_files_properties(${RESOURCE_OBJ} PROPERTIES EXTERNAL_OBJECT true GENERATED true) endforeach(RESOURCE_FILE) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index f2f43aabc7d..6b3136dc200 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -13,7 +13,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -47,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -85,6 +88,8 @@ # include # include # include +# include +# include #endif #if USE_SSL @@ -100,6 +105,10 @@ # include #endif +#if USE_JEMALLOC +# include +#endif + namespace CurrentMetrics { extern const Metric Revision; @@ -108,11 +117,35 @@ namespace CurrentMetrics extern const Metric MaxDDLEntryID; } +#if USE_JEMALLOC +static bool jemallocOptionEnabled(const char *name) +{ + bool value; + size_t size = sizeof(value); + + if (mallctl(name, reinterpret_cast(&value), &size, /* newp= */ nullptr, /* newlen= */ 0)) + throw Poco::SystemException("mallctl() failed"); + + return value; +} +#else +static bool jemallocOptionEnabled(const char *) { return 0; } +#endif + int mainEntryClickHouseServer(int argc, char ** argv) { DB::Server app; + if (jemallocOptionEnabled("opt.background_thread")) + { + LOG_ERROR(&app.logger(), + "jemalloc.background_thread was requested, " + "however ClickHouse uses percpu_arena and background_thread most likely will not give any benefits, " + "and also background_thread is not compatible with ClickHouse watchdog " + "(that can be disabled with CLICKHOUSE_WATCHDOG_ENABLE=0)"); + } + /// Do not fork separate process from watchdog if we attached to terminal. /// Otherwise it breaks gdb usage. /// Can be overridden by environment variable (cannot use server config at this moment). @@ -172,18 +205,24 @@ int waitServersToFinish(std::vector & servers, size_t const int sleep_one_ms = 100; int sleep_current_ms = 0; int current_connections = 0; - while (sleep_current_ms < sleep_max_ms) + for (;;) { current_connections = 0; + for (auto & server : servers) { server.stop(); current_connections += server.currentConnections(); } + if (!current_connections) break; + sleep_current_ms += sleep_one_ms; - std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms)); + if (sleep_current_ms < sleep_max_ms) + std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms)); + else + break; } return current_connections; } @@ -347,6 +386,11 @@ void Server::initialize(Poco::Util::Application & self) { BaseDaemon::initialize(self); logger().information("starting up"); + + LOG_INFO(&logger(), "OS Name = {}, OS Version = {}, OS Architecture = {}", + Poco::Environment::osName(), + Poco::Environment::osVersion(), + Poco::Environment::osArchitecture()); } std::string Server::getDefaultCorePath() const @@ -391,6 +435,19 @@ void checkForUsersNotInMainConfig( } +[[noreturn]] void forceShutdown() +{ +#if defined(THREAD_SANITIZER) && defined(OS_LINUX) + /// Thread sanitizer tries to do something on exit that we don't need if we want to exit immediately, + /// while connection handling threads are still run. + (void)syscall(SYS_exit_group, 0); + __builtin_unreachable(); +#else + _exit(0); +#endif +} + + int Server::main(const std::vector & /*args*/) { Poco::Logger * log = &logger(); @@ -425,8 +482,7 @@ int Server::main(const std::vector & /*args*/) * settings, available functions, data types, aggregate functions, databases, ... */ auto shared_context = Context::createShared(); - auto global_context = std::make_unique(Context::createGlobal(shared_context.get())); - global_context_ptr = global_context.get(); + global_context = Context::createGlobal(shared_context.get()); global_context->makeGlobalContext(); global_context->setApplicationType(Context::ApplicationType::SERVER); @@ -688,16 +744,8 @@ int Server::main(const std::vector & /*args*/) } } - if (config().has("interserver_http_credentials")) - { - String user = config().getString("interserver_http_credentials.user", ""); - String password = config().getString("interserver_http_credentials.password", ""); - - if (user.empty()) - throw Exception("Configuration parameter interserver_http_credentials user can't be empty", ErrorCodes::NO_ELEMENTS_IN_CONFIG); - - global_context->setInterserverCredentials(user, password); - } + LOG_DEBUG(log, "Initiailizing interserver credentials."); + global_context->updateInterserverCredentials(config()); if (config().has("macros")) global_context->setMacros(std::make_unique(config(), "macros", log)); @@ -758,6 +806,7 @@ int Server::main(const std::vector & /*args*/) global_context->setClustersConfig(config); global_context->setMacros(std::make_unique(*config, "macros", log)); global_context->setExternalAuthenticatorsConfig(*config); + global_context->setExternalModelsConfig(config); /// Setup protection to avoid accidental DROP for big tables (that are greater than 50 GB by default) if (config->has("max_table_size_to_drop")) @@ -777,6 +826,7 @@ int Server::main(const std::vector & /*args*/) } global_context->updateStorageConfiguration(*config); + global_context->updateInterserverCredentials(*config); }, /* already_loaded = */ false); /// Reload it right now (initial loading) @@ -835,7 +885,8 @@ int Server::main(const std::vector & /*args*/) global_context->setMMappedFileCache(mmap_cache_size); #if USE_EMBEDDED_COMPILER - size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", 500); + constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 1024; + size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", compiled_expression_cache_size_default); CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size); #endif @@ -885,10 +936,30 @@ int Server::main(const std::vector & /*args*/) servers_to_start_before_tables->emplace_back( port_name, std::make_unique( - new KeeperTCPHandlerFactory(*this), server_pool, socket, new Poco::Net::TCPServerParams)); + new KeeperTCPHandlerFactory(*this, false), server_pool, socket, new Poco::Net::TCPServerParams)); LOG_INFO(log, "Listening for connections to Keeper (tcp): {}", address.toString()); }); + + const char * secure_port_name = "keeper_server.tcp_port_secure"; + createServer(listen_host, secure_port_name, listen_try, [&](UInt16 port) + { +#if USE_SSL + Poco::Net::SecureServerSocket socket; + auto address = socketBindListen(socket, listen_host, port, /* secure = */ true); + socket.setReceiveTimeout(settings.receive_timeout); + socket.setSendTimeout(settings.send_timeout); + servers_to_start_before_tables->emplace_back( + secure_port_name, + std::make_unique( + new KeeperTCPHandlerFactory(*this, true), server_pool, socket, new Poco::Net::TCPServerParams)); + LOG_INFO(log, "Listening for connections to Keeper with secure protocol (tcp_secure): {}", address.toString()); +#else + UNUSED(port); + throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.", + ErrorCodes::SUPPORT_IS_DISABLED}; +#endif + }); } #else throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "ClickHouse server built without NuRaft library. Cannot use internal coordination."); @@ -937,10 +1008,12 @@ int Server::main(const std::vector & /*args*/) global_context->shutdownKeeperStorageDispatcher(); } + /// Wait server pool to avoid use-after-free of destroyed context in the handlers + server_pool.joinAll(); + /** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available. * At this moment, no one could own shared part of Context. */ - global_context_ptr = nullptr; global_context.reset(); shared_context.reset(); LOG_DEBUG(log, "Destroyed global context."); @@ -954,14 +1027,16 @@ int Server::main(const std::vector & /*args*/) try { - loadMetadataSystem(*global_context); + loadMetadataSystem(global_context); /// After attaching system databases we can initialize system log. global_context->initializeSystemLogs(); auto & database_catalog = DatabaseCatalog::instance(); /// After the system database is created, attach virtual system tables (in addition to query_log and part_log) attachSystemTablesServer(*database_catalog.getSystemDatabase(), has_zookeeper); + /// We load temporary database first, because projections need it. + database_catalog.loadTemporaryDatabase(); /// Then, load remaining databases - loadMetadata(*global_context, default_database); + loadMetadata(global_context, default_database); database_catalog.loadDatabases(); /// After loading validate that default database exists database_catalog.assertDatabaseExists(default_database); @@ -1041,7 +1116,7 @@ int Server::main(const std::vector & /*args*/) else { /// Initialize a watcher periodically updating DNS cache - dns_cache_updater = std::make_unique(*global_context, config().getInt("dns_cache_update_period", 15)); + dns_cache_updater = std::make_unique(global_context, config().getInt("dns_cache_update_period", 15)); } #if defined(OS_LINUX) @@ -1073,7 +1148,7 @@ int Server::main(const std::vector & /*args*/) { /// This object will periodically calculate some metrics. AsynchronousMetrics async_metrics( - *global_context, config().getUInt("asynchronous_metrics_update_period_s", 60), servers_to_start_before_tables, servers); + global_context, config().getUInt("asynchronous_metrics_update_period_s", 60), servers_to_start_before_tables, servers); attachSystemTablesAsync(*DatabaseCatalog::instance().getSystemDatabase(), async_metrics); for (const auto & listen_host : listen_hosts) @@ -1310,18 +1385,9 @@ int Server::main(const std::vector & /*args*/) } /// try to load dictionaries immediately, throw on error and die - ext::scope_guard dictionaries_xmls, models_xmls; try { - if (!config().getBool("dictionaries_lazy_load", true)) - { - global_context->tryCreateEmbeddedDictionaries(); - global_context->getExternalDictionariesLoader().enableAlwaysLoadEverything(true); - } - dictionaries_xmls = global_context->getExternalDictionariesLoader().addConfigRepository( - std::make_unique(config(), "dictionaries_config")); - models_xmls = global_context->getExternalModelsLoader().addConfigRepository( - std::make_unique(config(), "models_config")); + global_context->loadDictionaries(config()); } catch (...) { @@ -1336,7 +1402,7 @@ int Server::main(const std::vector & /*args*/) int pool_size = config().getInt("distributed_ddl.pool_size", 1); if (pool_size < 1) throw Exception("distributed_ddl.pool_size should be greater then 0", ErrorCodes::ARGUMENT_OUT_OF_BOUND); - global_context->setDDLWorker(std::make_unique(pool_size, ddl_zookeeper_path, *global_context, &config(), + global_context->setDDLWorker(std::make_unique(pool_size, ddl_zookeeper_path, global_context, &config(), "distributed_ddl", "DDLWorker", &CurrentMetrics::MaxDDLEntryID)); } @@ -1387,7 +1453,7 @@ int Server::main(const std::vector & /*args*/) /// Dump coverage here, because std::atexit callback would not be called. dumpCoverageReportIfPossible(); LOG_INFO(log, "Will shutdown forcefully."); - _exit(Application::EXIT_OK); + forceShutdown(); } }); diff --git a/programs/server/Server.h b/programs/server/Server.h index fbfc26f6ee5..c698108767c 100644 --- a/programs/server/Server.h +++ b/programs/server/Server.h @@ -40,9 +40,9 @@ public: return BaseDaemon::logger(); } - Context & context() const override + ContextPtr context() const override { - return *global_context_ptr; + return global_context; } bool isCancelled() const override @@ -64,8 +64,7 @@ protected: std::string getDefaultCorePath() const override; private: - Context * global_context_ptr = nullptr; - + ContextPtr global_context; Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const; using CreateServerFunc = std::function; diff --git a/programs/server/config.xml b/programs/server/config.xml index 9c01b328290..df8a5266c39 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -7,7 +7,20 @@ --> - + trace /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.err.log @@ -76,7 +89,7 @@ - + 9005 1000 + + 1073741824 /var/lib/clickhouse/ @@ -505,6 +520,33 @@ false + ' | sed -e 's|.*>\(.*\)<.*|\1|') + wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb + apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb + clickhouse-jdbc-bridge & + + * [CentOS/RHEL] + export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge + export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '' | sed -e 's|.*>\(.*\)<.*|\1|') + wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm + yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm + clickhouse-jdbc-bridge & + + Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information. + ]]> + + diff --git a/programs/server/data/.gitignore b/programs/server/data/.gitignore deleted file mode 100644 index b9719d9d1d1..00000000000 --- a/programs/server/data/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.txt -*.dat -*.idx diff --git a/programs/server/metadata/.gitignore b/programs/server/metadata/.gitignore deleted file mode 100644 index d1b811b7de5..00000000000 --- a/programs/server/metadata/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.sql diff --git a/release b/release index f2052840cb0..9484d79630a 100755 --- a/release +++ b/release @@ -2,23 +2,6 @@ # If you have "no space left" error, you can change the location of temporary files with BUILDPLACE environment variable. -# Advanced usage: -# Test gcc-9: -# env DIST=disco EXTRAPACKAGES="gcc-9 g++-9" DEB_CC=gcc-9 DEB_CXX=g++-9 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release -# Test gcc-8: -# env DIST=bionic EXTRAPACKAGES="gcc-8 g++-8" DEB_CC=gcc-8 DEB_CXX=g++-8 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release -# Clang6 build: -# env DIST=bionic EXTRAPACKAGES="clang-6.0 libstdc++-8-dev lld-6.0 liblld-6.0-dev libclang-6.0-dev liblld-6.0" DEB_CC=clang-6.0 DEB_CXX=clang++-6.0 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release -# Clang7 build: -# env DIST=unstable EXTRAPACKAGES="clang-7 libstdc++-8-dev lld-7 liblld-7-dev libclang-7-dev liblld-7" DEB_CC=clang-7 DEB_CXX=clang++-7 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release -# Clang6 without internal compiler (for low memory arm64): -# env DIST=bionic DISABLE_PARALLEL=1 EXTRAPACKAGES="clang-6.0 libstdc++-8-dev" DEB_CC=clang-6.0 DEB_CXX=clang++-6.0 CMAKE_FLAGS=" -DNO_WERROR=1 " ./release -# Do not compile internal compiler but use from system: -# env CMAKE_FLAGS="-DUSE_INTERNAL_LLVM_LIBRARY=0 -DENABLE_EMBEDDED_COMPILER=0 -DINTERNAL_COMPILER_EXECUTABLE=clang-6.0 -DINTERNAL_LINKER_EXECUTABLE=ld.lld-6.0 -DINTERNAL_COMPILER_BIN_ROOT=/usr/bin/" EXTRAPACKAGES="clang-6.0 lld-6.0 libstdc++-8-dev" DEB_CXX=clang++-6.0 DEB_CC=clang-6.0 TEST_RUN=1 TEST_OPT="compile" ./release - -# Build with ASan: -# env SANITIZER=address ./release - # Version increment: # Default release: 18.1.2 -> 18.2.0: # ./release --version @@ -31,16 +14,16 @@ set -e +# Avoid dependency on locale +LC_ALL=C + CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) cd $CUR_DIR source "./utils/release/release_lib.sh" -PBUILDER_AUTOUPDATE=${PBUILDER_AUTOUPDATE=4320} - DEBUILD_NOSIGN_OPTIONS="-us -uc" DEBUILD_NODEPS_OPTIONS="-d" -USE_PBUILDER=${USE_PBUILDER=1} if [ -z "$VERSION_STRING" ] ; then get_revision_author @@ -58,16 +41,6 @@ do elif [[ $1 == '--version' ]]; then gen_revision_author $2 exit 0 - elif [[ $1 == '--pbuilder' ]]; then - # Default - shift - elif [[ $1 == '--no-pbuilder' ]]; then - USE_PBUILDER= - shift - elif [[ $1 == '--fast' ]]; then - # Wrong but fast pbuilder mode: create base package with all depends - EXTRAPACKAGES="$EXTRAPACKAGES debhelper cmake ninja-build gcc-8 g++-8 libc6-dev libicu-dev libreadline-dev psmisc bash expect python3 python3-lxml python3-termcolor python3-requests curl perl sudo openssl netcat-openbsd" - shift elif [[ $1 == '--rpm' ]]; then MAKE_RPM=1 shift @@ -117,43 +90,10 @@ echo -e "\nCurrent version is $VERSION_STRING" if [ -z "$NO_BUILD" ] ; then gen_changelog "$VERSION_STRING" "" "$AUTHOR" "" - if [ -z "$USE_PBUILDER" ] ; then - DEB_CC=${DEB_CC:=`which gcc-10 gcc-9 gcc | head -n1`} - DEB_CXX=${DEB_CXX:=`which gcc-10 g++-9 g++ | head -n1`} - # Build (only binary packages). - debuild --preserve-env -e PATH \ - -e DEB_CC=$DEB_CC -e DEB_CXX=$DEB_CXX -e CMAKE_FLAGS="$CMAKE_FLAGS" \ - -b ${DEBUILD_NOSIGN_OPTIONS} ${DEBUILD_NODEPS_OPTIONS} - else - export DIST=${DIST:=bionic} - export SET_BUILDRESULT=${SET_BUILDRESULT:=$CUR_DIR/..} - - if [[ -z `which pbuilder` ]] ; then - sudo apt install -y pbuilder devscripts ccache fakeroot debhelper debian-archive-keyring debian-keyring lsb-release - fi - - . $CUR_DIR/debian/.pbuilderrc - - if [[ ! -e "/usr/share/debootstrap/scripts/${DIST}" ]] ; then - sudo ln -s gutsy /usr/share/debootstrap/scripts/${DIST} - fi - - if [[ -n "$FORCE_PBUILDER_CREATE" || ! -e "$BASETGZ" ]] ; then - echo Creating base system $BASETGZ - [ ! -e "/usr/share/debootstrap/scripts/${DIST}" ] && sudo ln -s gutsy /usr/share/debootstrap/scripts/${DIST} - sudo --preserve-env bash -x pbuilder create --configfile $CUR_DIR/debian/.pbuilderrc $PBUILDER_OPT - fi - - if [ "$PBUILDER_AUTOUPDATE" -gt 0 ]; then - # Update every 3 days (60*24*3 minutes) - if [[ -n "$PBUILDER_UPDATE" ]] || test `find "$BASETGZ" -mmin +$PBUILDER_AUTOUPDATE` ; then - echo Updating base system $BASETGZ - sudo --preserve-env pbuilder update --configfile $CUR_DIR/debian/.pbuilderrc $PBUILDER_OPT - fi - fi - - pdebuild --configfile $CUR_DIR/debian/.pbuilderrc -- $PBUILDER_OPT - fi + # Build (only binary packages). + debuild --preserve-env -e PATH \ + -e DEB_CC=$DEB_CC -e DEB_CXX=$DEB_CXX -e CMAKE_FLAGS="$CMAKE_FLAGS" \ + -b ${DEBUILD_NOSIGN_OPTIONS} ${DEBUILD_NODEPS_OPTIONS} fi if [ -n "$MAKE_RPM" ]; then diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index 40740b3164e..22d99112cb7 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -53,6 +53,12 @@ enum class AccessType M(ALTER_CLEAR_INDEX, "CLEAR INDEX", TABLE, ALTER_INDEX) \ M(ALTER_INDEX, "INDEX", GROUP, ALTER_TABLE) /* allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} INDEX */\ \ + M(ALTER_ADD_PROJECTION, "ADD PROJECTION", TABLE, ALTER_PROJECTION) \ + M(ALTER_DROP_PROJECTION, "DROP PROJECTION", TABLE, ALTER_PROJECTION) \ + M(ALTER_MATERIALIZE_PROJECTION, "MATERIALIZE PROJECTION", TABLE, ALTER_PROJECTION) \ + M(ALTER_CLEAR_PROJECTION, "CLEAR PROJECTION", TABLE, ALTER_PROJECTION) \ + M(ALTER_PROJECTION, "PROJECTION", GROUP, ALTER_TABLE) /* allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} PROJECTION */\ + \ M(ALTER_ADD_CONSTRAINT, "ADD CONSTRAINT", TABLE, ALTER_CONSTRAINT) \ M(ALTER_DROP_CONSTRAINT, "DROP CONSTRAINT", TABLE, ALTER_CONSTRAINT) \ M(ALTER_CONSTRAINT, "CONSTRAINT", GROUP, ALTER_TABLE) /* allows to execute ALTER {ADD|DROP} CONSTRAINT */\ @@ -62,7 +68,7 @@ enum class AccessType enabled implicitly by the grant ALTER_TABLE */\ M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\ M(ALTER_MOVE_PARTITION, "ALTER MOVE PART, MOVE PARTITION, MOVE PART", TABLE, ALTER_TABLE) \ - M(ALTER_FETCH_PARTITION, "FETCH PARTITION", TABLE, ALTER_TABLE) \ + M(ALTER_FETCH_PARTITION, "ALTER FETCH PART, FETCH PARTITION", TABLE, ALTER_TABLE) \ M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \ \ M(ALTER_TABLE, "", GROUP, ALTER) \ @@ -130,8 +136,10 @@ enum class AccessType M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_SYMBOLS, "RELOAD SYMBOLS", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \ + M(SYSTEM_RELOAD_MODEL, "SYSTEM RELOAD MODELS, RELOAD MODEL, RELOAD MODELS", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\ M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \ + M(SYSTEM_RESTART_DISK, "SYSTEM RESTART DISK", GLOBAL, SYSTEM) \ M(SYSTEM_MERGES, "SYSTEM STOP MERGES, SYSTEM START MERGES, STOP_MERGES, START MERGES", TABLE, SYSTEM) \ M(SYSTEM_TTL_MERGES, "SYSTEM STOP TTL MERGES, SYSTEM START TTL MERGES, STOP TTL MERGES, START TTL MERGES", TABLE, SYSTEM) \ M(SYSTEM_FETCHES, "SYSTEM STOP FETCHES, SYSTEM START FETCHES, STOP FETCHES, START FETCHES", TABLE, SYSTEM) \ diff --git a/src/Access/DiskAccessStorage.cpp b/src/Access/DiskAccessStorage.cpp index 80594f66dfc..8c38cd02f9c 100644 --- a/src/Access/DiskAccessStorage.cpp +++ b/src/Access/DiskAccessStorage.cpp @@ -355,8 +355,9 @@ String DiskAccessStorage::getStorageParamsJSON() const std::lock_guard lock{mutex}; Poco::JSON::Object json; json.set("path", directory_path); - if (readonly) - json.set("readonly", readonly.load()); + bool readonly_loaded = readonly; + if (readonly_loaded) + json.set("readonly", Poco::Dynamic::Var{true}); std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM oss.exceptions(std::ios::failbit); Poco::JSON::Stringifier::stringify(json, oss); diff --git a/src/Access/ExternalAuthenticators.cpp b/src/Access/ExternalAuthenticators.cpp index 1cade973724..0c4d2f417c9 100644 --- a/src/Access/ExternalAuthenticators.cpp +++ b/src/Access/ExternalAuthenticators.cpp @@ -77,7 +77,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str if (enable_tls_lc_str == "starttls") params.enable_tls = LDAPClient::Params::TLSEnable::YES_STARTTLS; else if (config.getBool(ldap_server_config + ".enable_tls")) - params.enable_tls = LDAPClient::Params::TLSEnable::YES; + params.enable_tls = LDAPClient::Params::TLSEnable::YES; //-V1048 else params.enable_tls = LDAPClient::Params::TLSEnable::NO; } @@ -96,7 +96,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str else if (tls_minimum_protocol_version_lc_str == "tls1.1") params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_1; else if (tls_minimum_protocol_version_lc_str == "tls1.2") - params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; + params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; //-V1048 else throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS); } @@ -113,7 +113,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str else if (tls_require_cert_lc_str == "try") params.tls_require_cert = LDAPClient::Params::TLSRequireCert::TRY; else if (tls_require_cert_lc_str == "demand") - params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; + params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; //-V1048 else throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS); } diff --git a/src/Access/GrantedRoles.cpp b/src/Access/GrantedRoles.cpp index 7930b56e44d..2659f8a3ec9 100644 --- a/src/Access/GrantedRoles.cpp +++ b/src/Access/GrantedRoles.cpp @@ -136,7 +136,7 @@ GrantedRoles::Elements GrantedRoles::getElements() const boost::range::set_difference(roles, roles_with_admin_option, std::back_inserter(element.ids)); if (!element.empty()) { - element.admin_option = false; + element.admin_option = false; //-V1048 elements.emplace_back(std::move(element)); } diff --git a/src/Access/IAccessStorage.h b/src/Access/IAccessStorage.h index 2cdd8eabf73..cc914664149 100644 --- a/src/Access/IAccessStorage.h +++ b/src/Access/IAccessStorage.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include #include diff --git a/src/Access/Quota.h b/src/Access/Quota.h index 430bdca29b0..b7970b2583b 100644 --- a/src/Access/Quota.h +++ b/src/Access/Quota.h @@ -45,7 +45,7 @@ struct Quota : public IAccessEntity struct ResourceTypeInfo { - const char * const raw_name; + const char * const raw_name = ""; const String name; /// Lowercased with underscores, e.g. "result_rows". const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS". const bool output_as_float = false; diff --git a/src/Access/RolesOrUsersSet.h b/src/Access/RolesOrUsersSet.h index 0d8983c2ec3..871bb0c0758 100644 --- a/src/Access/RolesOrUsersSet.h +++ b/src/Access/RolesOrUsersSet.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include diff --git a/src/Access/RowPolicy.h b/src/Access/RowPolicy.h index c9b4d69152d..723db545dbe 100644 --- a/src/Access/RowPolicy.h +++ b/src/Access/RowPolicy.h @@ -2,6 +2,7 @@ #include #include +#include #include diff --git a/src/Access/ya.make.in b/src/Access/ya.make.in index 0c5692a9bfa..1f11c7d7d2a 100644 --- a/src/Access/ya.make.in +++ b/src/Access/ya.make.in @@ -8,7 +8,7 @@ PEERDIR( SRCS( - + ) END() diff --git a/src/AggregateFunctions/AggregateFunctionAggThrow.cpp b/src/AggregateFunctions/AggregateFunctionAggThrow.cpp index c699dd4f217..09e343b2dc5 100644 --- a/src/AggregateFunctions/AggregateFunctionAggThrow.cpp +++ b/src/AggregateFunctions/AggregateFunctionAggThrow.cpp @@ -60,6 +60,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void create(AggregateDataPtr __restrict place) const override { if (std::uniform_real_distribution<>(0.0, 1.0)(thread_local_rng) <= throw_probability) diff --git a/src/AggregateFunctions/AggregateFunctionArgMinMax.h b/src/AggregateFunctions/AggregateFunctionArgMinMax.h index 9efc907aed3..77c710e0587 100644 --- a/src/AggregateFunctions/AggregateFunctionArgMinMax.h +++ b/src/AggregateFunctions/AggregateFunctionArgMinMax.h @@ -1,16 +1,14 @@ #pragma once -#include // SingleValueDataString used in embedded compiler -#include -#include -#include -#include #include -#include "Columns/IColumn.h" +#include +#include +#include // SingleValueDataString used in embedded compiler namespace DB { + namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; @@ -24,53 +22,47 @@ struct AggregateFunctionArgMinMaxData using ResultData_t = ResultData; using ValueData_t = ValueData; - ResultData result; // the argument at which the minimum/maximum value is reached. - ValueData value; // value for which the minimum/maximum is calculated. + ResultData result; // the argument at which the minimum/maximum value is reached. + ValueData value; // value for which the minimum/maximum is calculated. - static bool allocatesMemoryInArena() { return ResultData::allocatesMemoryInArena() || ValueData::allocatesMemoryInArena(); } - - static String name() { return StringRef(ValueData_t::name()) == StringRef("min") ? "argMin" : "argMax"; } + static bool allocatesMemoryInArena() + { + return ResultData::allocatesMemoryInArena() || ValueData::allocatesMemoryInArena(); + } }; /// Returns the first arg value found for the minimum/maximum value. Example: argMax(arg, value). template -class AggregateFunctionArgMinMax final : public IAggregateFunctionTupleArgHelper, 2> +class AggregateFunctionArgMinMax final : public IAggregateFunctionDataHelper> { private: const DataTypePtr & type_res; const DataTypePtr & type_val; const SerializationPtr serialization_res; const SerializationPtr serialization_val; - bool tuple_argument; - using Base = IAggregateFunctionTupleArgHelper, 2>; + using Base = IAggregateFunctionDataHelper>; public: - AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_, const bool tuple_argument_) - : Base({type_res_, type_val_}, {}, tuple_argument_) + AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_) + : Base({type_res_, type_val_}, {}) , type_res(this->argument_types[0]) , type_val(this->argument_types[1]) , serialization_res(type_res->getDefaultSerialization()) , serialization_val(type_val->getDefaultSerialization()) { if (!type_val->isComparable()) - throw Exception( - "Illegal type " + type_val->getName() + " of second argument of aggregate function " + getName() - + " because the values of that data type are not comparable", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - - this->tuple_argument = tuple_argument_; + throw Exception("Illegal type " + type_val->getName() + " of second argument of aggregate function " + getName() + + " because the values of that data type are not comparable", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } - String getName() const override { return Data::name(); } + String getName() const override + { + return StringRef(Data::ValueData_t::name()) == StringRef("min") ? "argMin" : "argMax"; + } DataTypePtr getReturnType() const override { - if (tuple_argument) - { - return std::make_shared(DataTypes{this->type_res, this->type_val}); - } - return type_res; } @@ -98,21 +90,15 @@ public: this->data(place).value.read(buf, *serialization_val, arena); } - bool allocatesMemoryInArena() const override { return Data::allocatesMemoryInArena(); } + bool allocatesMemoryInArena() const override + { + return Data::allocatesMemoryInArena(); + } void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - if (tuple_argument) - { - auto & tup = assert_cast(to); - - this->data(place).result.insertResultInto(tup.getColumn(0)); - this->data(place).value.insertResultInto(tup.getColumn(1)); - } - else - this->data(place).result.insertResultInto(to); + this->data(place).result.insertResultInto(to); } }; - } diff --git a/src/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h index 7bf742294b4..f2ea51ac28d 100644 --- a/src/AggregateFunctions/AggregateFunctionAvg.h +++ b/src/AggregateFunctions/AggregateFunctionAvg.h @@ -7,7 +7,7 @@ #include #include #include -#include "Core/DecimalFunctions.h" +#include namespace DB @@ -96,7 +96,9 @@ public: UInt32 num_scale_ = 0, UInt32 denom_scale_ = 0) : Base(argument_types_, {}), num_scale(num_scale_), denom_scale(denom_scale_) {} - DataTypePtr getReturnType() const final { return std::make_shared>(); } + DataTypePtr getReturnType() const override { return std::make_shared>(); } + + bool allocatesMemoryInArena() const override { return false; } void NO_SANITIZE_UNDEFINED merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override { diff --git a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h index f8b452fc444..8b932918aa5 100644 --- a/src/AggregateFunctions/AggregateFunctionAvgWeighted.h +++ b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h @@ -5,18 +5,18 @@ namespace DB { -template +template using AvgWeightedFieldType = std::conditional_t, std::conditional_t, Decimal256, Decimal128>, std::conditional_t, Float64, // no way to do UInt128 * UInt128, better cast to Float64 NearestFieldType>>; -template +template using MaxFieldType = std::conditional_t<(sizeof(AvgWeightedFieldType) > sizeof(AvgWeightedFieldType)), AvgWeightedFieldType, AvgWeightedFieldType>; -template +template class AggregateFunctionAvgWeighted final : public AggregateFunctionAvgBase< MaxFieldType, AvgWeightedFieldType, AggregateFunctionAvgWeighted> diff --git a/src/AggregateFunctions/AggregateFunctionBitwise.h b/src/AggregateFunctions/AggregateFunctionBitwise.h index 3ba8e045069..b48b1960329 100644 --- a/src/AggregateFunctions/AggregateFunctionBitwise.h +++ b/src/AggregateFunctions/AggregateFunctionBitwise.h @@ -54,6 +54,8 @@ public: return std::make_shared>(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { this->data(place).update(assert_cast &>(*columns[0]).getData()[row_num]); diff --git a/src/AggregateFunctions/AggregateFunctionBoundingRatio.h b/src/AggregateFunctions/AggregateFunctionBoundingRatio.h index 32ae22fd573..bee58a4a932 100644 --- a/src/AggregateFunctions/AggregateFunctionBoundingRatio.h +++ b/src/AggregateFunctions/AggregateFunctionBoundingRatio.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -20,7 +19,7 @@ namespace ErrorCodes /** Tracks the leftmost and rightmost (x, y) data points. */ -struct AggregateFunctionBoundingRatioData +struct AggregateFunctionBoundingRatioData //-V730 { struct Point { @@ -127,6 +126,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, const size_t row_num, Arena *) const override { /// NOTE Slightly inefficient. diff --git a/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h b/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h index ba8acb208ea..fb3e35fbcf1 100644 --- a/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h +++ b/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h @@ -33,6 +33,8 @@ public: return "categoricalInformationValue"; } + bool allocatesMemoryInArena() const override { return false; } + void create(AggregateDataPtr __restrict place) const override { memset(place, 0, sizeOfData()); diff --git a/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp b/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp index a20d355bb2f..e4ff8c134c5 100644 --- a/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp +++ b/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp @@ -13,17 +13,25 @@ namespace ErrorCodes void AggregateFunctionCombinatorFactory::registerCombinator(const AggregateFunctionCombinatorPtr & value) { - if (!dict.emplace(value->getName(), value).second) - throw Exception("AggregateFunctionCombinatorFactory: the name '" + value->getName() + "' is not unique", - ErrorCodes::LOGICAL_ERROR); + CombinatorPair pair{ + .name = value->getName(), + .combinator_ptr = value, + }; + + /// lower_bound() cannot be used since sort order of the dict is by length of the combinator + /// but there are just a few combiners, so not a problem. + if (std::find(dict.begin(), dict.end(), pair) != dict.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "AggregateFunctionCombinatorFactory: the name '{}' is not unique", + value->getName()); + dict.emplace(std::lower_bound(dict.begin(), dict.end(), pair), pair); } AggregateFunctionCombinatorPtr AggregateFunctionCombinatorFactory::tryFindSuffix(const std::string & name) const { /// O(N) is ok for just a few combinators. for (const auto & suffix_value : dict) - if (endsWith(name, suffix_value.first)) - return suffix_value.second; + if (endsWith(name, suffix_value.name)) + return suffix_value.combinator_ptr; return {}; } diff --git a/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h b/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h index b535475d111..5f7658c16af 100644 --- a/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h +++ b/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h @@ -15,7 +15,17 @@ namespace DB class AggregateFunctionCombinatorFactory final: private boost::noncopyable { private: - using Dict = std::unordered_map; + struct CombinatorPair + { + std::string name; + AggregateFunctionCombinatorPtr combinator_ptr; + + bool operator==(const CombinatorPair & rhs) const { return name == rhs.name; } + /// Sort by the length of the combinator name for proper tryFindSuffix() + /// for combiners with common prefix (i.e. "State" and "SimpleState"). + bool operator<(const CombinatorPair & rhs) const { return name.length() > rhs.name.length(); } + }; + using Dict = std::vector; Dict dict; public: diff --git a/src/AggregateFunctions/AggregateFunctionCount.h b/src/AggregateFunctions/AggregateFunctionCount.h index 1b3a0acb528..71db28390af 100644 --- a/src/AggregateFunctions/AggregateFunctionCount.h +++ b/src/AggregateFunctions/AggregateFunctionCount.h @@ -38,6 +38,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn **, size_t, Arena *) const override { ++data(place).count; @@ -126,6 +128,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { data(place).count += !assert_cast(*columns[0]).isNullAt(row_num); diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSum.h b/src/AggregateFunctions/AggregateFunctionDeltaSum.h index d5760de84ae..84cec222efb 100644 --- a/src/AggregateFunctions/AggregateFunctionDeltaSum.h +++ b/src/AggregateFunctions/AggregateFunctionDeltaSum.h @@ -22,8 +22,7 @@ struct AggregationFunctionDeltaSumData T sum = 0; T last = 0; T first = 0; - bool seen_last = false; - bool seen_first = false; + bool seen = false; }; template @@ -43,22 +42,23 @@ public: DataTypePtr getReturnType() const override { return std::make_shared>(); } + bool allocatesMemoryInArena() const override { return false; } + void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { auto value = assert_cast &>(*columns[0]).getData()[row_num]; - if ((this->data(place).last < value) && this->data(place).seen_last) + if ((this->data(place).last < value) && this->data(place).seen) { this->data(place).sum += (value - this->data(place).last); } this->data(place).last = value; - this->data(place).seen_last = true; - if (!this->data(place).seen_first) + if (!this->data(place).seen) { this->data(place).first = value; - this->data(place).seen_first = true; + this->data(place).seen = true; } } @@ -67,7 +67,7 @@ public: auto place_data = &this->data(place); auto rhs_data = &this->data(rhs); - if ((place_data->last < rhs_data->first) && place_data->seen_last && rhs_data->seen_first) + if ((place_data->last < rhs_data->first) && place_data->seen && rhs_data->seen) { // If the lhs last number seen is less than the first number the rhs saw, the lhs is before // the rhs, for example [0, 2] [4, 7]. So we want to add the deltasums, but also add the @@ -77,7 +77,7 @@ public: place_data->sum += rhs_data->sum + (rhs_data->first - place_data->last); place_data->last = rhs_data->last; } - else if ((rhs_data->last < place_data->first && rhs_data->seen_last && place_data->seen_first)) + else if ((rhs_data->first < place_data->last && rhs_data->seen && place_data->seen)) { // In the opposite scenario, the lhs comes after the rhs, e.g. [4, 6] [1, 2]. Since we // assume the input interval states are sorted by time, we assume this is a counter @@ -85,18 +85,17 @@ public: // rhs last value. place_data->sum += rhs_data->sum; - place_data->first = rhs_data->first; + place_data->last = rhs_data->last; } - else if (rhs_data->seen_first) + else if (rhs_data->seen && !place_data->seen) { // If we're here then the lhs is an empty state and the rhs does have some state, so // we'll just take that state. place_data->first = rhs_data->first; - place_data->seen_first = rhs_data->seen_first; place_data->last = rhs_data->last; - place_data->seen_last = rhs_data->seen_last; place_data->sum = rhs_data->sum; + place_data->seen = rhs_data->seen; } // Otherwise lhs either has data or is uninitialized, so we don't need to modify its values. @@ -107,8 +106,7 @@ public: writeIntBinary(this->data(place).sum, buf); writeIntBinary(this->data(place).first, buf); writeIntBinary(this->data(place).last, buf); - writePODBinary(this->data(place).seen_first, buf); - writePODBinary(this->data(place).seen_last, buf); + writePODBinary(this->data(place).seen, buf); } void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override @@ -116,8 +114,7 @@ public: readIntBinary(this->data(place).sum, buf); readIntBinary(this->data(place).first, buf); readIntBinary(this->data(place).last, buf); - readPODBinary(this->data(place).seen_first, buf); - readPODBinary(this->data(place).seen_last, buf); + readPODBinary(this->data(place).seen, buf); } void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp new file mode 100644 index 00000000000..70a1ac660b1 --- /dev/null +++ b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp @@ -0,0 +1,51 @@ +#include + +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; +} + +namespace +{ + +AggregateFunctionPtr createAggregateFunctionDeltaSumTimestamp( + const String & name, + const DataTypes & arguments, + const Array & params) +{ + assertNoParameters(name, params); + + if (arguments.size() != 2) + throw Exception("Incorrect number of arguments for aggregate function " + name, + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + + if (!isInteger(arguments[0]) && !isFloat(arguments[0]) && !isDateOrDateTime(arguments[0])) + throw Exception("Illegal type " + arguments[0]->getName() + " of argument for aggregate function " + + name + ", must be Int, Float, Date, DateTime", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + if (!isInteger(arguments[1]) && !isFloat(arguments[1]) && !isDateOrDateTime(arguments[1])) + throw Exception("Illegal type " + arguments[1]->getName() + " of argument for aggregate function " + + name + ", must be Int, Float, Date, DateTime", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + return AggregateFunctionPtr(createWithTwoNumericOrDateTypes( + *arguments[0], *arguments[1], arguments, params)); +} +} + +void registerAggregateFunctionDeltaSumTimestamp(AggregateFunctionFactory & factory) +{ + AggregateFunctionProperties properties = { .returns_default_when_only_null = true, .is_order_dependent = true }; + + factory.registerFunction("deltaSumTimestamp", { createAggregateFunctionDeltaSumTimestamp, properties }); +} + +} diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.h b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.h new file mode 100644 index 00000000000..b7e91e9524a --- /dev/null +++ b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.h @@ -0,0 +1,172 @@ +#pragma once + +#include +#include + +#include +#include + +#include +#include +#include + +#include + + +namespace DB +{ + +template +struct AggregationFunctionDeltaSumTimestampData +{ + ValueType sum = 0; + ValueType first = 0; + ValueType last = 0; + TimestampType first_ts = 0; + TimestampType last_ts = 0; + bool seen = false; +}; + +template +class AggregationFunctionDeltaSumTimestamp final + : public IAggregateFunctionDataHelper< + AggregationFunctionDeltaSumTimestampData, + AggregationFunctionDeltaSumTimestamp + > +{ +public: + AggregationFunctionDeltaSumTimestamp(const DataTypes & arguments, const Array & params) + : IAggregateFunctionDataHelper< + AggregationFunctionDeltaSumTimestampData, + AggregationFunctionDeltaSumTimestamp + >{arguments, params} + {} + + AggregationFunctionDeltaSumTimestamp() + : IAggregateFunctionDataHelper< + AggregationFunctionDeltaSumTimestampData, + AggregationFunctionDeltaSumTimestamp + >{} + {} + + bool allocatesMemoryInArena() const override { return false; } + + String getName() const override { return "deltaSumTimestamp"; } + + DataTypePtr getReturnType() const override { return std::make_shared>(); } + + void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override + { + auto value = assert_cast &>(*columns[0]).getData()[row_num]; + auto ts = assert_cast &>(*columns[1]).getData()[row_num]; + + if ((this->data(place).last < value) && this->data(place).seen) + { + this->data(place).sum += (value - this->data(place).last); + } + + this->data(place).last = value; + this->data(place).last_ts = ts; + + if (!this->data(place).seen) + { + this->data(place).first = value; + this->data(place).seen = true; + this->data(place).first_ts = ts; + } + } + + // before returns true if lhs is before rhs or false if it is not or can't be determined + bool ALWAYS_INLINE before ( + const AggregationFunctionDeltaSumTimestampData * lhs, + const AggregationFunctionDeltaSumTimestampData * rhs + ) const + { + if (lhs->last_ts < rhs->first_ts) + { + return true; + } + if (lhs->last_ts == rhs->first_ts && (lhs->last_ts < rhs->last_ts || lhs->first_ts < rhs->first_ts)) + { + return true; + } + return false; + } + + void NO_SANITIZE_UNDEFINED ALWAYS_INLINE merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override + { + auto place_data = &this->data(place); + auto rhs_data = &this->data(rhs); + + if (!place_data->seen && rhs_data->seen) + { + place_data->sum = rhs_data->sum; + place_data->seen = true; + place_data->first = rhs_data->first; + place_data->first_ts = rhs_data->first_ts; + place_data->last = rhs_data->last; + place_data->last_ts = rhs_data->last_ts; + } + else if (place_data->seen && !rhs_data->seen) + return; + else if (before(place_data, rhs_data)) + { + // This state came before the rhs state + + if (rhs_data->first > place_data->last) + place_data->sum += (rhs_data->first - place_data->last); + place_data->sum += rhs_data->sum; + place_data->last = rhs_data->last; + place_data->last_ts = rhs_data->last_ts; + } + else if (before(rhs_data, place_data)) + { + // This state came after the rhs state + + if (place_data->first > rhs_data->last) + place_data->sum += (place_data->first - rhs_data->last); + place_data->sum += rhs_data->sum; + place_data->first = rhs_data->first; + place_data->first_ts = rhs_data->first_ts; + } + else + { + // If none of those conditions matched, it means both states we are merging have all + // same timestamps. We have to pick either the smaller or larger value so that the + // result is deterministic. + + if (place_data->first < rhs_data->first) + { + place_data->first = rhs_data->first; + place_data->last = rhs_data->last; + } + } + } + + void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override + { + writeIntBinary(this->data(place).sum, buf); + writeIntBinary(this->data(place).first, buf); + writeIntBinary(this->data(place).first_ts, buf); + writeIntBinary(this->data(place).last, buf); + writeIntBinary(this->data(place).last_ts, buf); + writePODBinary(this->data(place).seen, buf); + } + + void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override + { + readIntBinary(this->data(place).sum, buf); + readIntBinary(this->data(place).first, buf); + readIntBinary(this->data(place).first_ts, buf); + readIntBinary(this->data(place).last, buf); + readIntBinary(this->data(place).last_ts, buf); + readPODBinary(this->data(place).seen, buf); + } + + void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override + { + assert_cast &>(to).getData().push_back(this->data(place).sum); + } +}; + +} diff --git a/src/AggregateFunctions/AggregateFunctionDistinct.h b/src/AggregateFunctions/AggregateFunctionDistinct.h index b587bbebf6e..b551f8a4ec5 100644 --- a/src/AggregateFunctions/AggregateFunctionDistinct.h +++ b/src/AggregateFunctions/AggregateFunctionDistinct.h @@ -10,7 +10,6 @@ #include #include -#include namespace DB { diff --git a/src/AggregateFunctions/AggregateFunctionEntropy.h b/src/AggregateFunctions/AggregateFunctionEntropy.h index 9bb1bc039c5..587f5aa3fc7 100644 --- a/src/AggregateFunctions/AggregateFunctionEntropy.h +++ b/src/AggregateFunctions/AggregateFunctionEntropy.h @@ -103,6 +103,8 @@ public: return std::make_shared>(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { if constexpr (!std::is_same_v) diff --git a/src/AggregateFunctions/AggregateFunctionFactory.cpp b/src/AggregateFunctions/AggregateFunctionFactory.cpp index c0011b6ebec..eec8b374424 100644 --- a/src/AggregateFunctions/AggregateFunctionFactory.cpp +++ b/src/AggregateFunctions/AggregateFunctionFactory.cpp @@ -121,7 +121,7 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl( is_case_insensitive = true; } - const Context * query_context = nullptr; + ContextPtr query_context; if (CurrentThread::isInitialized()) query_context = CurrentThread::get().getQueryContext(); diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.h b/src/AggregateFunctions/AggregateFunctionGroupArray.h index 921274f7d59..e59f19e3bae 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.h +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.h @@ -296,12 +296,7 @@ public: if (size) { typename ColumnVector::Container & data_to = assert_cast &>(arr_to.getData()).getData(); - if constexpr (is_big_int_v) - // is data_to empty? we should probably use std::vector::insert then - for (auto it = this->data(place).value.begin(); it != this->data(place).value.end(); it++) - data_to.push_back(*it); - else - data_to.insert(this->data(place).value.begin(), this->data(place).value.end()); + data_to.insert(this->data(place).value.begin(), this->data(place).value.end()); } } diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h b/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h index 35913f133b1..723ee7140bc 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h @@ -104,6 +104,8 @@ public: return std::make_shared(type); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { /// TODO Do positions need to be 1-based for this function? diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp index e2139ba882f..b6e9fda9559 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp @@ -21,6 +21,7 @@ namespace ErrorCodes namespace { +/// TODO Proper support for Decimal256. template struct MovingSum { diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h index 3bab831d316..8b7109ae3ef 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h @@ -38,7 +38,7 @@ struct MovingData using Array = PODArray; Array value; /// Prefix sums. - T sum = 0; + T sum{}; void NO_SANITIZE_UNDEFINED add(T val, Arena * arena) { @@ -69,9 +69,9 @@ struct MovingAvgData : public MovingData T NO_SANITIZE_UNDEFINED get(size_t idx, UInt64 window_size) const { if (idx < window_size) - return this->value[idx] / window_size; + return this->value[idx] / T(window_size); else - return (this->value[idx] - this->value[idx - window_size]) / window_size; + return (this->value[idx] - this->value[idx - window_size]) / T(window_size); } }; diff --git a/src/AggregateFunctions/AggregateFunctionGroupBitmap.h b/src/AggregateFunctions/AggregateFunctionGroupBitmap.h index 4628410286d..83db274752b 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupBitmap.h +++ b/src/AggregateFunctions/AggregateFunctionGroupBitmap.h @@ -22,6 +22,8 @@ public: DataTypePtr getReturnType() const override { return std::make_shared>(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { this->data(place).rbs.add(assert_cast &>(*columns[0]).getData()[row_num]); @@ -56,6 +58,8 @@ public: DataTypePtr getReturnType() const override { return std::make_shared>(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { Data & data_lhs = this->data(place); diff --git a/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h index 435efdd2373..16911184f31 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h +++ b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h @@ -59,6 +59,8 @@ public: return std::make_shared(this->argument_types[0]); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { if (limit_num_elems && this->data(place).value.size() >= max_elems) diff --git a/src/AggregateFunctions/AggregateFunctionHistogram.h b/src/AggregateFunctions/AggregateFunctionHistogram.h index 76aa96ba663..a5a6920ce33 100644 --- a/src/AggregateFunctions/AggregateFunctionHistogram.h +++ b/src/AggregateFunctions/AggregateFunctionHistogram.h @@ -220,7 +220,7 @@ private: } public: - AggregateFunctionHistogramData() + AggregateFunctionHistogramData() //-V730 : size(0) , lower_bound(std::numeric_limits::max()) , upper_bound(std::numeric_limits::lowest()) @@ -332,6 +332,8 @@ public: return std::make_shared(tuple); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { auto val = assert_cast &>(*columns[0]).getData()[row_num]; diff --git a/src/AggregateFunctions/AggregateFunctionIf.h b/src/AggregateFunctions/AggregateFunctionIf.h index 8144ae355ba..0670fa0e69a 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.h +++ b/src/AggregateFunctions/AggregateFunctionIf.h @@ -113,6 +113,16 @@ public: nested_func->merge(place, rhs, arena); } + void mergeBatch( + size_t batch_size, + AggregateDataPtr * places, + size_t place_offset, + const AggregateDataPtr * rhs, + Arena * arena) const override + { + nested_func->mergeBatch(batch_size, places, place_offset, rhs, arena); + } + void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override { nested_func->serialize(place, buf); diff --git a/src/AggregateFunctions/AggregateFunctionMLMethod.cpp b/src/AggregateFunctions/AggregateFunctionMLMethod.cpp index 6c5c5af2f1d..145660e881b 100644 --- a/src/AggregateFunctions/AggregateFunctionMLMethod.cpp +++ b/src/AggregateFunctions/AggregateFunctionMLMethod.cpp @@ -146,7 +146,7 @@ void LinearModelData::predict( const ColumnsWithTypeAndName & arguments, size_t offset, size_t limit, - const Context & context) const + ContextPtr context) const { gradient_computer->predict(container, arguments, offset, limit, weights, bias, context); } @@ -453,7 +453,7 @@ void LogisticRegression::predict( size_t limit, const std::vector & weights, Float64 bias, - const Context & /*context*/) const + ContextPtr /*context*/) const { size_t rows_num = arguments.front().column->size(); @@ -521,7 +521,7 @@ void LinearRegression::predict( size_t limit, const std::vector & weights, Float64 bias, - const Context & /*context*/) const + ContextPtr /*context*/) const { if (weights.size() + 1 != arguments.size()) { diff --git a/src/AggregateFunctions/AggregateFunctionMLMethod.h b/src/AggregateFunctions/AggregateFunctionMLMethod.h index 0c88f9d877d..6d97feb3193 100644 --- a/src/AggregateFunctions/AggregateFunctionMLMethod.h +++ b/src/AggregateFunctions/AggregateFunctionMLMethod.h @@ -3,10 +3,10 @@ #include #include #include -#include -#include -#include #include +#include +#include +#include #include "IAggregateFunction.h" namespace DB @@ -44,7 +44,7 @@ public: size_t limit, const std::vector & weights, Float64 bias, - const Context & context) const = 0; + ContextPtr context) const = 0; }; @@ -69,7 +69,7 @@ public: size_t limit, const std::vector & weights, Float64 bias, - const Context & context) const override; + ContextPtr context) const override; }; @@ -94,7 +94,7 @@ public: size_t limit, const std::vector & weights, Float64 bias, - const Context & context) const override; + ContextPtr context) const override; }; @@ -264,7 +264,7 @@ public: const ColumnsWithTypeAndName & arguments, size_t offset, size_t limit, - const Context & context) const; + ContextPtr context) const; void returnWeights(IColumn & to) const; private: @@ -323,6 +323,8 @@ public: return std::make_shared(std::make_shared()); } + bool allocatesMemoryInArena() const override { return false; } + /// This function is called from evalMLMethod function for correct predictValues call DataTypePtr getReturnTypeToPredict() const override { @@ -363,7 +365,7 @@ public: const ColumnsWithTypeAndName & arguments, size_t offset, size_t limit, - const Context & context) const override + ContextPtr context) const override { if (arguments.size() != param_num + 1) throw Exception( diff --git a/src/AggregateFunctions/AggregateFunctionMannWhitney.h b/src/AggregateFunctions/AggregateFunctionMannWhitney.h index 1451536d519..7573de65cc0 100644 --- a/src/AggregateFunctions/AggregateFunctionMannWhitney.h +++ b/src/AggregateFunctions/AggregateFunctionMannWhitney.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -21,7 +20,7 @@ #include -#include + namespace DB { @@ -174,6 +173,8 @@ public: return "mannWhitneyUTest"; } + bool allocatesMemoryInArena() const override { return true; } + DataTypePtr getReturnType() const override { DataTypes types @@ -208,7 +209,7 @@ public: void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override { auto & a = this->data(place); - auto & b = this->data(rhs); + const auto & b = this->data(rhs); a.merge(b, arena); } diff --git a/src/AggregateFunctions/AggregateFunctionMaxIntersections.h b/src/AggregateFunctions/AggregateFunctionMaxIntersections.h index d4946ad2c9d..94509a40ada 100644 --- a/src/AggregateFunctions/AggregateFunctionMaxIntersections.h +++ b/src/AggregateFunctions/AggregateFunctionMaxIntersections.h @@ -87,6 +87,8 @@ public: return std::make_shared>(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override { PointType left = assert_cast &>(*columns[0]).getData()[row_num]; diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 919026a78c1..f4561660ad5 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -181,7 +181,7 @@ public: /** For strings. Short strings are stored in the object itself, and long strings are allocated separately. * NOTE It could also be suitable for arrays of numbers. */ -struct SingleValueDataString +struct SingleValueDataString //-V730 { private: using Self = SingleValueDataString; diff --git a/src/AggregateFunctions/AggregateFunctionNothing.h b/src/AggregateFunctions/AggregateFunctionNothing.h index f373b3b55b0..a094c1e0fac 100644 --- a/src/AggregateFunctions/AggregateFunctionNothing.h +++ b/src/AggregateFunctions/AggregateFunctionNothing.h @@ -28,6 +28,8 @@ public: return argument_types.front(); } + bool allocatesMemoryInArena() const override { return false; } + void create(AggregateDataPtr) const override { } diff --git a/src/AggregateFunctions/AggregateFunctionOrFill.h b/src/AggregateFunctions/AggregateFunctionOrFill.h index 4bb25e0d4de..732e83e5a0c 100644 --- a/src/AggregateFunctions/AggregateFunctionOrFill.h +++ b/src/AggregateFunctions/AggregateFunctionOrFill.h @@ -196,6 +196,18 @@ public: place[size_of_data] |= rhs[size_of_data]; } + void mergeBatch( + size_t batch_size, + AggregateDataPtr * places, + size_t place_offset, + const AggregateDataPtr * rhs, + Arena * arena) const override + { + nested_function->mergeBatch(batch_size, places, place_offset, rhs, arena); + for (size_t i = 0; i < batch_size; ++i) + (places[i] + place_offset)[size_of_data] |= rhs[i][size_of_data]; + } + void serialize( ConstAggregateDataPtr place, WriteBuffer & buf) const override diff --git a/src/AggregateFunctions/AggregateFunctionQuantile.cpp b/src/AggregateFunctions/AggregateFunctionQuantile.cpp index 1dcdb288339..e8d86a03ff1 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantile.cpp +++ b/src/AggregateFunctions/AggregateFunctionQuantile.cpp @@ -100,13 +100,14 @@ AggregateFunctionPtr createAggregateFunctionQuantile(const std::string & name, c if (which.idx == TypeIndex::Decimal32) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Decimal64) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Decimal128) return std::make_shared>(argument_types, params); + if (which.idx == TypeIndex::Decimal256) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::DateTime64) return std::make_shared>(argument_types, params); - //if (which.idx == TypeIndex::Decimal256) return std::make_shared>(argument_types, params); } if constexpr (supportBigInt()) { if (which.idx == TypeIndex::Int128) return std::make_shared>(argument_types, params); + if (which.idx == TypeIndex::UInt128) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Int256) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::UInt256) return std::make_shared>(argument_types, params); } diff --git a/src/AggregateFunctions/AggregateFunctionQuantile.h b/src/AggregateFunctions/AggregateFunctionQuantile.h index edd24add736..209784361dd 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantile.h +++ b/src/AggregateFunctions/AggregateFunctionQuantile.h @@ -103,6 +103,8 @@ public: return res; } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { auto value = static_cast(*columns[0]).getData()[row_num]; diff --git a/src/AggregateFunctions/AggregateFunctionRankCorrelation.h b/src/AggregateFunctions/AggregateFunctionRankCorrelation.h index a7e0852378c..fbba1b99ab9 100644 --- a/src/AggregateFunctions/AggregateFunctionRankCorrelation.h +++ b/src/AggregateFunctions/AggregateFunctionRankCorrelation.h @@ -58,6 +58,8 @@ public: return "rankCorr"; } + bool allocatesMemoryInArena() const override { return true; } + DataTypePtr getReturnType() const override { return std::make_shared>(); diff --git a/src/AggregateFunctions/AggregateFunctionRetention.h b/src/AggregateFunctions/AggregateFunctionRetention.h index 5f0d9907280..aee74cb7324 100644 --- a/src/AggregateFunctions/AggregateFunctionRetention.h +++ b/src/AggregateFunctions/AggregateFunctionRetention.h @@ -94,6 +94,8 @@ public: return std::make_shared(std::make_shared()); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, const size_t row_num, Arena *) const override { for (const auto i : ext::range(0, events_size)) diff --git a/src/AggregateFunctions/AggregateFunctionSequenceMatch.h b/src/AggregateFunctions/AggregateFunctionSequenceMatch.h index 48015a6d282..2c2a4d4c75a 100644 --- a/src/AggregateFunctions/AggregateFunctionSequenceMatch.h +++ b/src/AggregateFunctions/AggregateFunctionSequenceMatch.h @@ -560,6 +560,8 @@ public: DataTypePtr getReturnType() const override { return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { this->data(place).sort(); @@ -588,6 +590,8 @@ public: DataTypePtr getReturnType() const override { return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { this->data(place).sort(); diff --git a/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h b/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h index 8c029855a26..61f10895de6 100644 --- a/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h +++ b/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h @@ -168,6 +168,8 @@ public: ); } + bool allocatesMemoryInArena() const override { return false; } + void insertResultInto( AggregateDataPtr place, IColumn & to, diff --git a/src/AggregateFunctions/AggregateFunctionStatistics.cpp b/src/AggregateFunctions/AggregateFunctionStatistics.cpp index 9e3aa0962db..d8c8c2ab210 100644 --- a/src/AggregateFunctions/AggregateFunctionStatistics.cpp +++ b/src/AggregateFunctions/AggregateFunctionStatistics.cpp @@ -36,7 +36,7 @@ AggregateFunctionPtr createAggregateFunctionStatisticsBinary(const std::string & assertNoParameters(name, parameters); assertBinary(name, argument_types); - AggregateFunctionPtr res(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], argument_types)); + AggregateFunctionPtr res(createWithTwoBasicNumericTypes(*argument_types[0], *argument_types[1], argument_types)); if (!res) throw Exception("Illegal types " + argument_types[0]->getName() + " and " + argument_types[1]->getName() + " of arguments for aggregate function " + name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); diff --git a/src/AggregateFunctions/AggregateFunctionStatistics.h b/src/AggregateFunctions/AggregateFunctionStatistics.h index 76b6e843c15..2b778f85d99 100644 --- a/src/AggregateFunctions/AggregateFunctionStatistics.h +++ b/src/AggregateFunctions/AggregateFunctionStatistics.h @@ -13,7 +13,7 @@ namespace DB { -namespace +namespace detail { /// This function returns true if both values are large and comparable. @@ -72,7 +72,7 @@ public: Float64 factor = static_cast(count * source.count) / total_count; Float64 delta = mean - source.mean; - if (areComparable(count, source.count)) + if (detail::areComparable(count, source.count)) mean = (source.count * source.mean + count * mean) / total_count; else mean = source.mean + delta * (static_cast(count) / total_count); @@ -123,6 +123,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { this->data(place).update(*columns[0], row_num); @@ -300,7 +302,7 @@ public: Float64 left_delta = left_mean - source.left_mean; Float64 right_delta = right_mean - source.right_mean; - if (areComparable(count, source.count)) + if (detail::areComparable(count, source.count)) { left_mean = (source.count * source.left_mean + count * left_mean) / total_count; right_mean = (source.count * source.right_mean + count * right_mean) / total_count; @@ -375,6 +377,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { this->data(place).update(*columns[0], *columns[1], row_num); diff --git a/src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp b/src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp index c7c2f9025ed..5dde84c1b64 100644 --- a/src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp +++ b/src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp @@ -41,7 +41,7 @@ AggregateFunctionPtr createAggregateFunctionStatisticsBinary(const std::string & assertNoParameters(name, parameters); assertBinary(name, argument_types); - AggregateFunctionPtr res(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], argument_types)); + AggregateFunctionPtr res(createWithTwoBasicNumericTypes(*argument_types[0], *argument_types[1], argument_types)); if (!res) throw Exception("Illegal types " + argument_types[0]->getName() + " and " + argument_types[1]->getName() + " of arguments for aggregate function " + name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); diff --git a/src/AggregateFunctions/AggregateFunctionStatisticsSimple.h b/src/AggregateFunctions/AggregateFunctionStatisticsSimple.h index 9903e2f6eaa..eaf119a6264 100644 --- a/src/AggregateFunctions/AggregateFunctionStatisticsSimple.h +++ b/src/AggregateFunctions/AggregateFunctionStatisticsSimple.h @@ -121,6 +121,8 @@ public: return std::make_shared>(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { if constexpr (StatFunc::num_args == 2) @@ -129,11 +131,10 @@ public: static_cast(static_cast(*columns[1]).getData()[row_num])); else { - if constexpr (std::is_same_v) + if constexpr (IsDecimalNumber) { this->data(place).add(static_cast( - static_cast(*columns[0]).getData()[row_num].value - )); + static_cast(*columns[0]).getData()[row_num].value)); } else this->data(place).add( diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index bd1f9fc302e..1748458f6d8 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -314,6 +314,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { const auto & column = assert_cast(*columns[0]); diff --git a/src/AggregateFunctions/AggregateFunctionSumCount.cpp b/src/AggregateFunctions/AggregateFunctionSumCount.cpp new file mode 100644 index 00000000000..b979779d907 --- /dev/null +++ b/src/AggregateFunctions/AggregateFunctionSumCount.cpp @@ -0,0 +1,49 @@ +#include +#include +#include +#include +#include "registerAggregateFunctions.h" + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ILLEGAL_TYPE_OF_ARGUMENT; +} + +namespace +{ +bool allowType(const DataTypePtr& type) noexcept +{ + const WhichDataType t(type); + return t.isInt() || t.isUInt() || t.isFloat() || t.isDecimal(); +} + +AggregateFunctionPtr createAggregateFunctionSumCount(const std::string & name, const DataTypes & argument_types, const Array & parameters) +{ + assertNoParameters(name, parameters); + assertUnary(name, argument_types); + + AggregateFunctionPtr res; + DataTypePtr data_type = argument_types[0]; + if (!allowType(data_type)) + throw Exception("Illegal type " + data_type->getName() + " of argument for aggregate function " + name, + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + if (isDecimal(data_type)) + res.reset(createWithDecimalType( + *data_type, argument_types, getDecimalScale(*data_type))); + else + res.reset(createWithNumericType(*data_type, argument_types)); + + return res; +} + +} + +void registerAggregateFunctionSumCount(AggregateFunctionFactory & factory) +{ + factory.registerFunction("sumCount", createAggregateFunctionSumCount); +} + +} diff --git a/src/AggregateFunctions/AggregateFunctionSumCount.h b/src/AggregateFunctions/AggregateFunctionSumCount.h new file mode 100644 index 00000000000..1026b6272ba --- /dev/null +++ b/src/AggregateFunctions/AggregateFunctionSumCount.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ +template +using DecimalOrNumberDataType = std::conditional_t, DataTypeDecimal>, DataTypeNumber>>; +template +class AggregateFunctionSumCount final : public AggregateFunctionAvgBase, UInt64, AggregateFunctionSumCount> +{ +public: + using Base = AggregateFunctionAvgBase, UInt64, AggregateFunctionSumCount>; + + AggregateFunctionSumCount(const DataTypes & argument_types_, UInt32 num_scale_ = 0) + : Base(argument_types_, num_scale_), scale(num_scale_) {} + + DataTypePtr getReturnType() const override + { + DataTypes types; + if constexpr (IsDecimalNumber) + types.emplace_back(std::make_shared>(DecimalOrNumberDataType::maxPrecision(), scale)); + else + types.emplace_back(std::make_shared>()); + + types.emplace_back(std::make_shared()); + + return std::make_shared(types); + } + + void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const final + { + assert_cast> &>((assert_cast(to)).getColumn(0)).getData().push_back( + this->data(place).numerator); + + assert_cast((assert_cast(to)).getColumn(1)).getData().push_back( + this->data(place).denominator); + } + + void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final + { + this->data(place).numerator += static_cast &>(*columns[0]).getData()[row_num]; + ++this->data(place).denominator; + } + + String getName() const final { return "sumCount"; } + +private: + UInt32 scale; +}; + +} diff --git a/src/AggregateFunctions/AggregateFunctionSumMap.h b/src/AggregateFunctions/AggregateFunctionSumMap.h index d8b19bad62f..7819bb8752c 100644 --- a/src/AggregateFunctions/AggregateFunctionSumMap.h +++ b/src/AggregateFunctions/AggregateFunctionSumMap.h @@ -140,6 +140,8 @@ public: return std::make_shared(types); } + bool allocatesMemoryInArena() const override { return false; } + static const auto & getArgumentColumns(const IColumn**& columns) { if constexpr (tuple_argument) diff --git a/src/AggregateFunctions/AggregateFunctionTTest.h b/src/AggregateFunctions/AggregateFunctionTTest.h index 3c9873ebd1e..5617adf38dd 100644 --- a/src/AggregateFunctions/AggregateFunctionTTest.h +++ b/src/AggregateFunctions/AggregateFunctionTTest.h @@ -109,6 +109,8 @@ public: ); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { Float64 value = columns[0]->getFloat64(row_num); diff --git a/src/AggregateFunctions/AggregateFunctionTopK.cpp b/src/AggregateFunctions/AggregateFunctionTopK.cpp index e32da02f442..79a39817ea5 100644 --- a/src/AggregateFunctions/AggregateFunctionTopK.cpp +++ b/src/AggregateFunctions/AggregateFunctionTopK.cpp @@ -2,9 +2,10 @@ #include #include #include +#include #include #include -#include "registerAggregateFunctions.h" + #define TOP_K_MAX_SIZE 0xFFFFFF diff --git a/src/AggregateFunctions/AggregateFunctionTopK.h b/src/AggregateFunctions/AggregateFunctionTopK.h index 43320a96b99..5e5e7d07cec 100644 --- a/src/AggregateFunctions/AggregateFunctionTopK.h +++ b/src/AggregateFunctions/AggregateFunctionTopK.h @@ -10,7 +10,6 @@ #include #include -#include #include #include @@ -50,6 +49,8 @@ public: return std::make_shared(this->argument_types[0]); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { auto & set = this->data(place).value; diff --git a/src/AggregateFunctions/AggregateFunctionUniq.cpp b/src/AggregateFunctions/AggregateFunctionUniq.cpp index 32fdb188529..bc44eb8eece 100644 --- a/src/AggregateFunctions/AggregateFunctionUniq.cpp +++ b/src/AggregateFunctions/AggregateFunctionUniq.cpp @@ -132,6 +132,12 @@ void registerAggregateFunctionsUniq(AggregateFunctionFactory & factory) factory.registerFunction("uniqExact", {createAggregateFunctionUniq>, properties}); + +#if USE_DATASKETCHES + factory.registerFunction("uniqTheta", + {createAggregateFunctionUniq, properties}); +#endif + } } diff --git a/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h index 4e27922ba7c..cb894cc3eca 100644 --- a/src/AggregateFunctions/AggregateFunctionUniq.h +++ b/src/AggregateFunctions/AggregateFunctionUniq.h @@ -22,6 +22,7 @@ #include #include +#include #include @@ -69,7 +70,7 @@ struct AggregateFunctionUniqHLL12Data }; template <> -struct AggregateFunctionUniqHLL12Data +struct AggregateFunctionUniqHLL12Data { using Set = HyperLogLogWithSmallSetOptimization; Set set; @@ -124,6 +125,19 @@ struct AggregateFunctionUniqExactData }; +/// uniqTheta +#if USE_DATASKETCHES + +struct AggregateFunctionUniqThetaData +{ + using Set = ThetaSketchData; + Set set; + + static String getName() { return "uniqTheta"; } +}; + +#endif + namespace detail { @@ -133,16 +147,14 @@ template struct AggregateFunctionUniqTraits { static UInt64 hash(T x) { - if constexpr (std::is_same_v) - { - return sipHash64(x); - } - else if constexpr (std::is_same_v || std::is_same_v) + if constexpr (std::is_same_v || std::is_same_v) { return ext::bit_cast(x); } else if constexpr (sizeof(T) <= sizeof(UInt64)) + { return x; + } else return DefaultHash64(x); } @@ -184,11 +196,17 @@ struct OneAdder UInt128 key; SipHash hash; hash.update(value.data, value.size); - hash.get128(key.low, key.high); + hash.get128(key); data.set.insert(key); } } +#if USE_DATASKETCHES + else if constexpr (std::is_same_v) + { + data.set.insertOriginal(column.getDataAt(row_num)); + } +#endif } }; @@ -210,6 +228,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + /// ALWAYS_INLINE is required to have better code layout for uniqHLL12 function void ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { @@ -265,6 +285,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { this->data(place).set.insert(typename Data::Set::value_type( diff --git a/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp b/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp index c0064044f95..84a9267ffe0 100644 --- a/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp +++ b/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp @@ -3,11 +3,13 @@ #include #include +#include + #include #include #include -#include "registerAggregateFunctions.h" + namespace DB { diff --git a/src/AggregateFunctions/AggregateFunctionUniqCombined.h b/src/AggregateFunctions/AggregateFunctionUniqCombined.h index c9327594670..f425a343caa 100644 --- a/src/AggregateFunctions/AggregateFunctionUniqCombined.h +++ b/src/AggregateFunctions/AggregateFunctionUniqCombined.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include #include @@ -141,6 +140,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { if constexpr (!std::is_same_v) @@ -211,6 +212,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { this->data(place).set.insert(typename AggregateFunctionUniqCombinedData::Set::value_type( diff --git a/src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp b/src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp index 9befc515de6..8195bd8964c 100644 --- a/src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp +++ b/src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp @@ -1,11 +1,11 @@ #include #include #include +#include #include #include #include #include -#include "registerAggregateFunctions.h" namespace DB diff --git a/src/AggregateFunctions/AggregateFunctionUniqUpTo.h b/src/AggregateFunctions/AggregateFunctionUniqUpTo.h index d2c765137bc..5dd62c96a9e 100644 --- a/src/AggregateFunctions/AggregateFunctionUniqUpTo.h +++ b/src/AggregateFunctions/AggregateFunctionUniqUpTo.h @@ -1,6 +1,7 @@ #pragma once -#include +#include + #include #include @@ -17,7 +18,7 @@ #include -#if !__clang__ +#if !defined(__clang__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Warray-bounds" #endif @@ -35,7 +36,7 @@ namespace DB */ template -struct __attribute__((__packed__)) AggregateFunctionUniqUpToData +struct AggregateFunctionUniqUpToData { /** If count == threshold + 1 - this means that it is "overflowed" (values greater than threshold). * In this case (for example, after calling the merge function), the `data` array does not necessarily contain the initialized values @@ -43,7 +44,17 @@ struct __attribute__((__packed__)) AggregateFunctionUniqUpToData * then set count to `threshold + 1`, and values from another state are not copied. */ UInt8 count = 0; - T data[0]; + char data_ptr[0]; + + T load(size_t i) const + { + return unalignedLoad(data_ptr + i * sizeof(T)); + } + + void store(size_t i, const T & x) + { + unalignedStore(data_ptr + i * sizeof(T), x); + } size_t size() const { @@ -60,12 +71,12 @@ struct __attribute__((__packed__)) AggregateFunctionUniqUpToData /// Linear search for the matching element. for (size_t i = 0; i < count; ++i) - if (data[i] == x) + if (load(i) == x) return; /// Did not find the matching element. If there is room for one more element, insert it. if (count < threshold) - data[count] = x; + store(count, x); /// After increasing count, the state may be overflowed. ++count; @@ -84,7 +95,7 @@ struct __attribute__((__packed__)) AggregateFunctionUniqUpToData } for (size_t i = 0; i < rhs.count; ++i) - insert(rhs.data[i], threshold); + insert(rhs.load(i), threshold); } void write(WriteBuffer & wb, UInt8 threshold) const @@ -93,7 +104,7 @@ struct __attribute__((__packed__)) AggregateFunctionUniqUpToData /// Write values only if the state is not overflowed. Otherwise, they are not needed, and only the fact that the state is overflowed is important. if (count <= threshold) - wb.write(reinterpret_cast(data), count * sizeof(data[0])); + wb.write(data_ptr, count * sizeof(T)); } void read(ReadBuffer & rb, UInt8 threshold) @@ -101,7 +112,7 @@ struct __attribute__((__packed__)) AggregateFunctionUniqUpToData readBinary(count, rb); if (count <= threshold) - rb.read(reinterpret_cast(data), count * sizeof(data[0])); + rb.read(data_ptr, count * sizeof(T)); } /// ALWAYS_INLINE is required to have better code layout for uniqUpTo function @@ -184,6 +195,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + /// ALWAYS_INLINE is required to have better code layout for uniqUpTo function void ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { @@ -247,6 +260,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { this->data(place).insert(UInt64(UniqVariadicHash::apply(num_args, columns, row_num)), threshold); @@ -276,7 +291,7 @@ public: } -#if !__clang__ +#if !defined(__clang__) #pragma GCC diagnostic pop #endif diff --git a/src/AggregateFunctions/AggregateFunctionWindowFunnel.h b/src/AggregateFunctions/AggregateFunctionWindowFunnel.h index 9644b8d9ea4..9f399a9f25b 100644 --- a/src/AggregateFunctions/AggregateFunctionWindowFunnel.h +++ b/src/AggregateFunctions/AggregateFunctionWindowFunnel.h @@ -247,6 +247,8 @@ public: return std::make_shared(); } + bool allocatesMemoryInArena() const override { return false; } + AggregateFunctionPtr getOwnNullAdapter( const AggregateFunctionPtr & nested_function, const DataTypes & arguments, const Array & params, const AggregateFunctionProperties & /*properties*/) const override diff --git a/src/AggregateFunctions/CMakeLists.txt b/src/AggregateFunctions/CMakeLists.txt index c23831be94e..b6dbf2b4eb0 100644 --- a/src/AggregateFunctions/CMakeLists.txt +++ b/src/AggregateFunctions/CMakeLists.txt @@ -1,4 +1,4 @@ -include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) +include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") add_headers_and_sources(clickhouse_aggregate_functions .) list(REMOVE_ITEM clickhouse_aggregate_functions_sources @@ -24,6 +24,6 @@ list(REMOVE_ITEM clickhouse_aggregate_functions_headers add_library(clickhouse_aggregate_functions ${clickhouse_aggregate_functions_sources}) target_link_libraries(clickhouse_aggregate_functions PRIVATE dbms PUBLIC ${CITYHASH_LIBRARIES}) -if(ENABLE_TESTS) - add_subdirectory(tests) +if(ENABLE_EXAMPLES) + add_subdirectory(examples) endif() diff --git a/src/AggregateFunctions/Helpers.h b/src/AggregateFunctions/Helpers.h index 2b21b745a0e..d1f1830eb27 100644 --- a/src/AggregateFunctions/Helpers.h +++ b/src/AggregateFunctions/Helpers.h @@ -15,12 +15,12 @@ M(Float32) \ M(Float64) -// No UInt128 here because of the name conflict #define FOR_NUMERIC_TYPES(M) \ M(UInt8) \ M(UInt16) \ M(UInt32) \ M(UInt64) \ + M(UInt128) \ M(UInt256) \ M(Int8) \ M(Int16) \ @@ -31,12 +31,6 @@ M(Float32) \ M(Float64) -#define FOR_DECIMAL_TYPES(M) \ - M(Decimal32) \ - M(Decimal64) \ - M(Decimal128) - - namespace DB { @@ -115,6 +109,8 @@ static IAggregateFunction * createWithUnsignedIntegerType(const IDataType & argu if (which.idx == TypeIndex::UInt16) return new AggregateFunctionTemplate>(std::forward(args)...); if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate>(std::forward(args)...); if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate>(std::forward(args)...); + if (which.idx == TypeIndex::UInt128) return new AggregateFunctionTemplate>(std::forward(args)...); + if (which.idx == TypeIndex::UInt256) return new AggregateFunctionTemplate>(std::forward(args)...); return nullptr; } @@ -125,11 +121,11 @@ static IAggregateFunction * createWithNumericBasedType(const IDataType & argumen if (f) return f; - /// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32 and UUID based on UInt128 + /// expects that DataTypeDate based on UInt16, DataTypeDateTime based on UInt32 WhichDataType which(argument_type); if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate(std::forward(args)...); if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate(std::forward(args)...); - if (which.idx == TypeIndex::UUID) return new AggregateFunctionTemplate(std::forward(args)...); + if (which.idx == TypeIndex::UUID) return new AggregateFunctionTemplate(std::forward(args)...); return nullptr; } @@ -190,6 +186,69 @@ static IAggregateFunction * createWithTwoNumericTypes(const IDataType & first_ty return nullptr; } +template class AggregateFunctionTemplate, typename... TArgs> +static IAggregateFunction * createWithTwoBasicNumericTypesSecond(const IDataType & second_type, TArgs && ... args) +{ + WhichDataType which(second_type); +#define DISPATCH(TYPE) \ + if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate(std::forward(args)...); + FOR_BASIC_NUMERIC_TYPES(DISPATCH) +#undef DISPATCH + return nullptr; +} + +template