mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into normalize-bigint
This commit is contained in:
commit
8f01af62d9
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -1,2 +1,4 @@
|
|||||||
contrib/* linguist-vendored
|
contrib/* linguist-vendored
|
||||||
*.h linguist-language=C++
|
*.h linguist-language=C++
|
||||||
|
# to avoid frequent conflicts
|
||||||
|
tests/queries/0_stateless/arcadia_skip_list.txt text merge=union
|
||||||
|
2
.github/codecov.yml
vendored
2
.github/codecov.yml
vendored
@ -1,5 +1,5 @@
|
|||||||
codecov:
|
codecov:
|
||||||
max_report_age: off
|
max_report_age: "off"
|
||||||
strict_yaml_branch: "master"
|
strict_yaml_branch: "master"
|
||||||
|
|
||||||
ignore:
|
ignore:
|
||||||
|
36
.github/workflows/anchore-analysis.yml
vendored
36
.github/workflows/anchore-analysis.yml
vendored
@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
name: Docker Container Scan (clickhouse-server)
|
name: Docker Container Scan (clickhouse-server)
|
||||||
|
|
||||||
on:
|
"on":
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- docker/server/Dockerfile
|
- docker/server/Dockerfile
|
||||||
@ -20,20 +20,20 @@ jobs:
|
|||||||
Anchore-Build-Scan:
|
Anchore-Build-Scan:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout the code
|
- name: Checkout the code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build the Docker image
|
- name: Build the Docker image
|
||||||
run: |
|
run: |
|
||||||
cd docker/server
|
cd docker/server
|
||||||
perl -pi -e 's|=\$version||g' Dockerfile
|
perl -pi -e 's|=\$version||g' Dockerfile
|
||||||
docker build . --file Dockerfile --tag localbuild/testimage:latest
|
docker build . --file Dockerfile --tag localbuild/testimage:latest
|
||||||
- name: Run the local Anchore scan action itself with GitHub Advanced Security code scanning integration enabled
|
- name: Run the local Anchore scan action itself with GitHub Advanced Security code scanning integration enabled
|
||||||
uses: anchore/scan-action@v2
|
uses: anchore/scan-action@v2
|
||||||
id: scan
|
id: scan
|
||||||
with:
|
with:
|
||||||
image: "localbuild/testimage:latest"
|
image: "localbuild/testimage:latest"
|
||||||
acs-report-enable: true
|
acs-report-enable: true
|
||||||
- name: Upload Anchore Scan Report
|
- name: Upload Anchore Scan Report
|
||||||
uses: github/codeql-action/upload-sarif@v1
|
uses: github/codeql-action/upload-sarif@v1
|
||||||
with:
|
with:
|
||||||
sarif_file: ${{ steps.scan.outputs.sarif }}
|
sarif_file: ${{ steps.scan.outputs.sarif }}
|
||||||
|
32
.github/workflows/codeql-analysis.yml
vendored
32
.github/workflows/codeql-analysis.yml
vendored
@ -1,32 +0,0 @@
|
|||||||
# See the example here: https://github.com/github/codeql-action
|
|
||||||
|
|
||||||
name: "CodeQL Scanning"
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '0 19 * * *'
|
|
||||||
jobs:
|
|
||||||
CodeQL-Build:
|
|
||||||
|
|
||||||
runs-on: self-hosted
|
|
||||||
timeout-minutes: 1440
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 2
|
|
||||||
submodules: 'recursive'
|
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
|
||||||
uses: github/codeql-action/init@v1
|
|
||||||
|
|
||||||
with:
|
|
||||||
languages: cpp
|
|
||||||
|
|
||||||
- run: sudo apt-get update && sudo apt-get install -y git cmake python ninja-build gcc-10 g++-10 && mkdir build
|
|
||||||
- run: cd build && CC=gcc-10 CXX=g++-10 cmake ..
|
|
||||||
- run: cd build && ninja
|
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
|
||||||
uses: github/codeql-action/analyze@v1
|
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -137,3 +137,9 @@ website/package-lock.json
|
|||||||
/prof
|
/prof
|
||||||
|
|
||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
|
# data store
|
||||||
|
/programs/server/data
|
||||||
|
/programs/server/metadata
|
||||||
|
/programs/server/store
|
||||||
|
|
||||||
|
14
.gitmodules
vendored
14
.gitmodules
vendored
@ -93,7 +93,7 @@
|
|||||||
url = https://github.com/ClickHouse-Extras/libunwind.git
|
url = https://github.com/ClickHouse-Extras/libunwind.git
|
||||||
[submodule "contrib/simdjson"]
|
[submodule "contrib/simdjson"]
|
||||||
path = contrib/simdjson
|
path = contrib/simdjson
|
||||||
url = https://github.com/ClickHouse-Extras/simdjson.git
|
url = https://github.com/simdjson/simdjson.git
|
||||||
[submodule "contrib/rapidjson"]
|
[submodule "contrib/rapidjson"]
|
||||||
path = contrib/rapidjson
|
path = contrib/rapidjson
|
||||||
url = https://github.com/ClickHouse-Extras/rapidjson
|
url = https://github.com/ClickHouse-Extras/rapidjson
|
||||||
@ -133,7 +133,7 @@
|
|||||||
url = https://github.com/unicode-org/icu.git
|
url = https://github.com/unicode-org/icu.git
|
||||||
[submodule "contrib/flatbuffers"]
|
[submodule "contrib/flatbuffers"]
|
||||||
path = contrib/flatbuffers
|
path = contrib/flatbuffers
|
||||||
url = https://github.com/google/flatbuffers.git
|
url = https://github.com/ClickHouse-Extras/flatbuffers.git
|
||||||
[submodule "contrib/libc-headers"]
|
[submodule "contrib/libc-headers"]
|
||||||
path = contrib/libc-headers
|
path = contrib/libc-headers
|
||||||
url = https://github.com/ClickHouse-Extras/libc-headers.git
|
url = https://github.com/ClickHouse-Extras/libc-headers.git
|
||||||
@ -184,7 +184,7 @@
|
|||||||
url = https://github.com/ClickHouse-Extras/krb5
|
url = https://github.com/ClickHouse-Extras/krb5
|
||||||
[submodule "contrib/cyrus-sasl"]
|
[submodule "contrib/cyrus-sasl"]
|
||||||
path = contrib/cyrus-sasl
|
path = contrib/cyrus-sasl
|
||||||
url = https://github.com/cyrusimap/cyrus-sasl
|
url = https://github.com/ClickHouse-Extras/cyrus-sasl
|
||||||
branch = cyrus-sasl-2.1
|
branch = cyrus-sasl-2.1
|
||||||
[submodule "contrib/croaring"]
|
[submodule "contrib/croaring"]
|
||||||
path = contrib/croaring
|
path = contrib/croaring
|
||||||
@ -220,4 +220,10 @@
|
|||||||
url = https://github.com/ClickHouse-Extras/boringssl.git
|
url = https://github.com/ClickHouse-Extras/boringssl.git
|
||||||
[submodule "contrib/NuRaft"]
|
[submodule "contrib/NuRaft"]
|
||||||
path = contrib/NuRaft
|
path = contrib/NuRaft
|
||||||
url = https://github.com/eBay/NuRaft.git
|
url = https://github.com/ClickHouse-Extras/NuRaft.git
|
||||||
|
[submodule "contrib/nanodbc"]
|
||||||
|
path = contrib/nanodbc
|
||||||
|
url = https://github.com/ClickHouse-Extras/nanodbc.git
|
||||||
|
[submodule "contrib/datasketches-cpp"]
|
||||||
|
path = contrib/datasketches-cpp
|
||||||
|
url = https://github.com/ClickHouse-Extras/datasketches-cpp.git
|
||||||
|
16
.potato.yml
16
.potato.yml
@ -14,14 +14,14 @@ handlers:
|
|||||||
# The trigger for creating the Yandex.Tracker issue. When the specified event occurs, it transfers PR data to Yandex.Tracker.
|
# The trigger for creating the Yandex.Tracker issue. When the specified event occurs, it transfers PR data to Yandex.Tracker.
|
||||||
github:pullRequest:labeled:
|
github:pullRequest:labeled:
|
||||||
data:
|
data:
|
||||||
# The Yandex.Tracker queue to create the issue in. Each issue in Tracker belongs to one of the project queues.
|
# The Yandex.Tracker queue to create the issue in. Each issue in Tracker belongs to one of the project queues.
|
||||||
queue: CLICKHOUSEDOCS
|
queue: CLICKHOUSEDOCS
|
||||||
# The issue title.
|
# The issue title.
|
||||||
summary: '[Potato] Pull Request #{{pullRequest.number}}'
|
summary: '[Potato] Pull Request #{{pullRequest.number}}'
|
||||||
# The issue description.
|
# The issue description.
|
||||||
description: >
|
description: >
|
||||||
{{pullRequest.description}}
|
{{pullRequest.description}}
|
||||||
|
|
||||||
Ссылка на Pull Request: {{pullRequest.webUrl}}
|
Ссылка на Pull Request: {{pullRequest.webUrl}}
|
||||||
# The condition for creating the Yandex.Tracker issue.
|
# The condition for creating the Yandex.Tracker issue.
|
||||||
condition: eventPayload.labels.filter(label => ['pr-feature'].includes(label.name)).length
|
condition: eventPayload.labels.filter(label => ['pr-feature'].includes(label.name)).length
|
||||||
|
45
.pylintrc
Normal file
45
.pylintrc
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# vim: ft=config
|
||||||
|
|
||||||
|
[BASIC]
|
||||||
|
max-module-lines=2000
|
||||||
|
# due to SQL
|
||||||
|
max-line-length=200
|
||||||
|
# Drop/decrease them one day:
|
||||||
|
max-branches=50
|
||||||
|
max-nested-blocks=10
|
||||||
|
max-statements=200
|
||||||
|
|
||||||
|
[FORMAT]
|
||||||
|
ignore-long-lines = (# )?<?https?://\S+>?$
|
||||||
|
|
||||||
|
[MESSAGES CONTROL]
|
||||||
|
disable = bad-continuation,
|
||||||
|
missing-docstring,
|
||||||
|
bad-whitespace,
|
||||||
|
too-few-public-methods,
|
||||||
|
invalid-name,
|
||||||
|
too-many-arguments,
|
||||||
|
keyword-arg-before-vararg,
|
||||||
|
too-many-locals,
|
||||||
|
too-many-instance-attributes,
|
||||||
|
cell-var-from-loop,
|
||||||
|
fixme,
|
||||||
|
too-many-public-methods,
|
||||||
|
wildcard-import,
|
||||||
|
unused-wildcard-import,
|
||||||
|
singleton-comparison,
|
||||||
|
# pytest.mark.parametrize is not callable (not-callable)
|
||||||
|
not-callable,
|
||||||
|
# https://github.com/PyCQA/pylint/issues/3882
|
||||||
|
# [Python 3.9] Value 'Optional' is unsubscriptable (unsubscriptable-object) (also Union)
|
||||||
|
unsubscriptable-object,
|
||||||
|
# Drop them one day:
|
||||||
|
redefined-outer-name,
|
||||||
|
broad-except,
|
||||||
|
bare-except,
|
||||||
|
no-else-return,
|
||||||
|
global-statement
|
||||||
|
|
||||||
|
[SIMILARITIES]
|
||||||
|
# due to SQL
|
||||||
|
min-similarity-lines=1000
|
15
.yamllint
Normal file
15
.yamllint
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# vi: ft=yaml
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
rules:
|
||||||
|
indentation:
|
||||||
|
level: warning
|
||||||
|
indent-sequences: consistent
|
||||||
|
line-length:
|
||||||
|
# there are some bash -c "", so this is OK
|
||||||
|
max: 300
|
||||||
|
level: warning
|
||||||
|
comments:
|
||||||
|
min-spaces-from-content: 1
|
||||||
|
document-start:
|
||||||
|
present: false
|
484
CHANGELOG.md
484
CHANGELOG.md
@ -1,5 +1,489 @@
|
|||||||
|
## ClickHouse release 21.4
|
||||||
|
|
||||||
|
### ClickHouse release 21.4.1 2021-04-12
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
|
||||||
|
* The `toStartOfIntervalFunction` will align hour intervals to the midnight (in previous versions they were aligned to the start of unix epoch). For example, `toStartOfInterval(x, INTERVAL 11 HOUR)` will split every day into three intervals: `00:00:00..10:59:59`, `11:00:00..21:59:59` and `22:00:00..23:59:59`. This behaviour is more suited for practical needs. This closes [#9510](https://github.com/ClickHouse/ClickHouse/issues/9510). [#22060](https://github.com/ClickHouse/ClickHouse/pull/22060) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* `Age` and `Precision` in graphite rollup configs should increase from retention to retention. Now it's checked and the wrong config raises an exception. [#21496](https://github.com/ClickHouse/ClickHouse/pull/21496) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix `cutToFirstSignificantSubdomainCustom()`/`firstSignificantSubdomainCustom()` returning wrong result for 3+ level domains present in custom top-level domain list. For input domains matching these custom top-level domains, the third-level domain was considered to be the first significant one. This is now fixed. This change may introduce incompatibility if the function is used in e.g. the sharding key. [#21946](https://github.com/ClickHouse/ClickHouse/pull/21946) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Column `keys` in table `system.dictionaries` was replaced to columns `key.names` and `key.types`. Columns `key.names`, `key.types`, `attribute.names`, `attribute.types` from `system.dictionaries` table does not require dictionary to be loaded. [#21884](https://github.com/ClickHouse/ClickHouse/pull/21884) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Now replicas that are processing the `ALTER TABLE ATTACH PART[ITION]` command search in their `detached/` folders before fetching the data from other replicas. As an implementation detail, a new command `ATTACH_PART` is introduced in the replicated log. Parts are searched and compared by their checksums. [#18978](https://github.com/ClickHouse/ClickHouse/pull/18978) ([Mike Kot](https://github.com/myrrc)). **Note**:
|
||||||
|
* `ATTACH PART[ITION]` queries may not work during cluster upgrade.
|
||||||
|
* It's not possible to rollback to older ClickHouse version after executing `ALTER ... ATTACH` query in new version as the old servers would fail to pass the `ATTACH_PART` entry in the replicated log.
|
||||||
|
* In this version, empty `<remote_url_allow_hosts></remote_url_allow_hosts>` will block all access to remote hosts while in previous versions it did nothing. If you want to keep old behaviour and you have empty `remote_url_allow_hosts` element in configuration file, remove it. [#20058](https://github.com/ClickHouse/ClickHouse/pull/20058) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
|
||||||
|
* Extended range of `DateTime64` to support dates from year 1925 to 2283. Improved support of `DateTime` around zero date (`1970-01-01`). [#9404](https://github.com/ClickHouse/ClickHouse/pull/9404) ([alexey-milovidov](https://github.com/alexey-milovidov), [Vasily Nemkov](https://github.com/Enmk)). Not every time and date functions are working for extended range of dates.
|
||||||
|
* Added support of Kerberos authentication for preconfigured users and HTTP requests (GSS-SPNEGO). [#14995](https://github.com/ClickHouse/ClickHouse/pull/14995) ([Denis Glazachev](https://github.com/traceon)).
|
||||||
|
* Add `prefer_column_name_to_alias` setting to use original column names instead of aliases. it is needed to be more compatible with common databases' aliasing rules. This is for [#9715](https://github.com/ClickHouse/ClickHouse/issues/9715) and [#9887](https://github.com/ClickHouse/ClickHouse/issues/9887). [#22044](https://github.com/ClickHouse/ClickHouse/pull/22044) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Added functions `dictGetChildren(dictionary, key)`, `dictGetDescendants(dictionary, key, level)`. Function `dictGetChildren` return all children as an array if indexes. It is a inverse transformation for `dictGetHierarchy`. Function `dictGetDescendants` return all descendants as if `dictGetChildren` was applied `level` times recursively. Zero `level` value is equivalent to infinity. Closes [#14656](https://github.com/ClickHouse/ClickHouse/issues/14656). [#22096](https://github.com/ClickHouse/ClickHouse/pull/22096) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Added `executable_pool` dictionary source. Close [#14528](https://github.com/ClickHouse/ClickHouse/issues/14528). [#21321](https://github.com/ClickHouse/ClickHouse/pull/21321) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Added table function `dictionary`. It works the same way as `Dictionary` engine. Closes [#21560](https://github.com/ClickHouse/ClickHouse/issues/21560). [#21910](https://github.com/ClickHouse/ClickHouse/pull/21910) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Support `Nullable` type for `PolygonDictionary` attribute. [#21890](https://github.com/ClickHouse/ClickHouse/pull/21890) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Functions `dictGet`, `dictHas` use current database name if it is not specified for dictionaries created with DDL. Closes [#21632](https://github.com/ClickHouse/ClickHouse/issues/21632). [#21859](https://github.com/ClickHouse/ClickHouse/pull/21859) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Added function `dictGetOrNull`. It works like `dictGet`, but return `Null` in case key was not found in dictionary. Closes [#22375](https://github.com/ClickHouse/ClickHouse/issues/22375). [#22413](https://github.com/ClickHouse/ClickHouse/pull/22413) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Added async update in `ComplexKeyCache`, `SSDCache`, `SSDComplexKeyCache` dictionaries. Added support for `Nullable` type in `Cache`, `ComplexKeyCache`, `SSDCache`, `SSDComplexKeyCache` dictionaries. Added support for multiple attributes fetch with `dictGet`, `dictGetOrDefault` functions. Fixes [#21517](https://github.com/ClickHouse/ClickHouse/issues/21517). [#20595](https://github.com/ClickHouse/ClickHouse/pull/20595) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Support `dictHas` function for `RangeHashedDictionary`. Fixes [#6680](https://github.com/ClickHouse/ClickHouse/issues/6680). [#19816](https://github.com/ClickHouse/ClickHouse/pull/19816) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Add function `timezoneOf` that returns the timezone name of `DateTime` or `DateTime64` data types. This does not close [#9959](https://github.com/ClickHouse/ClickHouse/issues/9959). Fix inconsistencies in function names: add aliases `timezone` and `timeZone` as well as `toTimezone` and `toTimeZone` and `timezoneOf` and `timeZoneOf`. [#22001](https://github.com/ClickHouse/ClickHouse/pull/22001) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add new optional clause `GRANTEES` for `CREATE/ALTER USER` commands. It specifies users or roles which are allowed to receive grants from this user on condition this user has also all required access granted with grant option. By default `GRANTEES ANY` is used which means a user with grant option can grant to anyone. Syntax: `CREATE USER ... GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]`. [#21641](https://github.com/ClickHouse/ClickHouse/pull/21641) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Add new column `slowdowns_count` to `system.clusters`. When using hedged requests, it shows how many times we switched to another replica because this replica was responding slowly. Also show actual value of `errors_count` in `system.clusters`. [#21480](https://github.com/ClickHouse/ClickHouse/pull/21480) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add `_partition_id` virtual column for `MergeTree*` engines. Allow to prune partitions by `_partition_id`. Add `partitionID()` function to calculate partition id string. [#21401](https://github.com/ClickHouse/ClickHouse/pull/21401) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Add function `isIPAddressInRange` to test if an IPv4 or IPv6 address is contained in a given CIDR network prefix. [#21329](https://github.com/ClickHouse/ClickHouse/pull/21329) ([PHO](https://github.com/depressed-pho)).
|
||||||
|
* Added new SQL command `ALTER TABLE 'table_name' UNFREEZE [PARTITION 'part_expr'] WITH NAME 'backup_name'`. This command is needed to properly remove 'freezed' partitions from all disks. [#21142](https://github.com/ClickHouse/ClickHouse/pull/21142) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||||
|
* Supports implicit key type conversion for JOIN. [#19885](https://github.com/ClickHouse/ClickHouse/pull/19885) ([Vladimir](https://github.com/vdimir)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
|
||||||
|
* Support `RANGE OFFSET` frame (for window functions) for floating point types. Implement `lagInFrame`/`leadInFrame` window functions, which are analogous to `lag`/`lead`, but respect the window frame. They are identical when the frame is `between unbounded preceding and unbounded following`. This closes [#5485](https://github.com/ClickHouse/ClickHouse/issues/5485). [#21895](https://github.com/ClickHouse/ClickHouse/pull/21895) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||||
|
* Zero-copy replication for `ReplicatedMergeTree` over S3 storage. [#16240](https://github.com/ClickHouse/ClickHouse/pull/16240) ([ianton-ru](https://github.com/ianton-ru)).
|
||||||
|
* Added possibility to migrate existing S3 disk to the schema with backup-restore capabilities. [#22070](https://github.com/ClickHouse/ClickHouse/pull/22070) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
|
||||||
|
* Supported parallel formatting in `clickhouse-local` and everywhere else. [#21630](https://github.com/ClickHouse/ClickHouse/pull/21630) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Support parallel parsing for `CSVWithNames` and `TSVWithNames` formats. This closes [#21085](https://github.com/ClickHouse/ClickHouse/issues/21085). [#21149](https://github.com/ClickHouse/ClickHouse/pull/21149) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Enable read with mmap IO for file ranges from 64 MiB (the settings `min_bytes_to_use_mmap_io`). It may lead to moderate performance improvement. [#22326](https://github.com/ClickHouse/ClickHouse/pull/22326) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add cache for files read with `min_bytes_to_use_mmap_io` setting. It makes significant (2x and more) performance improvement when the value of the setting is small by avoiding frequent mmap/munmap calls and the consequent page faults. Note that mmap IO has major drawbacks that makes it less reliable in production (e.g. hung or SIGBUS on faulty disks; less controllable memory usage). Nevertheless it is good in benchmarks. [#22206](https://github.com/ClickHouse/ClickHouse/pull/22206) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Avoid unnecessary data copy when using codec `NONE`. Please note that codec `NONE` is mostly useless - it's recommended to always use compression (`LZ4` is by default). Despite the common belief, disabling compression may not improve performance (the opposite effect is possible). The `NONE` codec is useful in some cases: - when data is uncompressable; - for synthetic benchmarks. [#22145](https://github.com/ClickHouse/ClickHouse/pull/22145) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Faster `GROUP BY` with small `max_rows_to_group_by` and `group_by_overflow_mode='any'`. [#21856](https://github.com/ClickHouse/ClickHouse/pull/21856) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Optimize performance of queries like `SELECT ... FINAL ... WHERE`. Now in queries with `FINAL` it's allowed to move to `PREWHERE` columns, which are in sorting key. [#21830](https://github.com/ClickHouse/ClickHouse/pull/21830) ([foolchi](https://github.com/foolchi)).
|
||||||
|
* Improved performance by replacing `memcpy` to another implementation. This closes [#18583](https://github.com/ClickHouse/ClickHouse/issues/18583). [#21520](https://github.com/ClickHouse/ClickHouse/pull/21520) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve performance of aggregation in order of sorting key (with enabled setting `optimize_aggregation_in_order`). [#19401](https://github.com/ClickHouse/ClickHouse/pull/19401) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
|
||||||
|
* Add connection pool for PostgreSQL table/database engine and dictionary source. Should fix [#21444](https://github.com/ClickHouse/ClickHouse/issues/21444). [#21839](https://github.com/ClickHouse/ClickHouse/pull/21839) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Support non-default table schema for postgres storage/table-function. Closes [#21701](https://github.com/ClickHouse/ClickHouse/issues/21701). [#21711](https://github.com/ClickHouse/ClickHouse/pull/21711) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Support replicas priority for postgres dictionary source. [#21710](https://github.com/ClickHouse/ClickHouse/pull/21710) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Introduce a new merge tree setting `min_bytes_to_rebalance_partition_over_jbod` which allows assigning new parts to different disks of a JBOD volume in a balanced way. [#16481](https://github.com/ClickHouse/ClickHouse/pull/16481) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Added `Grant`, `Revoke` and `System` values of `query_kind` column for corresponding queries in `system.query_log`. [#21102](https://github.com/ClickHouse/ClickHouse/pull/21102) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||||
|
* Allow customizing timeouts for HTTP connections used for replication independently from other HTTP timeouts. [#20088](https://github.com/ClickHouse/ClickHouse/pull/20088) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
|
* Better exception message in client in case of exception while server is writing blocks. In previous versions client may get misleading message like `Data compressed with different methods`. [#22427](https://github.com/ClickHouse/ClickHouse/pull/22427) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix error `Directory tmp_fetch_XXX already exists` which could happen after failed fetch part. Delete temporary fetch directory if it already exists. Fixes [#14197](https://github.com/ClickHouse/ClickHouse/issues/14197). [#22411](https://github.com/ClickHouse/ClickHouse/pull/22411) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
|
* Fix MSan report for function `range` with `UInt256` argument (support for large integers is experimental). This closes [#22157](https://github.com/ClickHouse/ClickHouse/issues/22157). [#22387](https://github.com/ClickHouse/ClickHouse/pull/22387) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add `current_database` column to `system.processes` table. It contains the current database of the query. [#22365](https://github.com/ClickHouse/ClickHouse/pull/22365) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||||
|
* Add case-insensitive history search/navigation and subword movement features to `clickhouse-client`. [#22105](https://github.com/ClickHouse/ClickHouse/pull/22105) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* If tuple of NULLs, e.g. `(NULL, NULL)` is on the left hand side of `IN` operator with tuples of non-NULLs on the right hand side, e.g. `SELECT (NULL, NULL) IN ((0, 0), (3, 1))` return 0 instead of throwing an exception about incompatible types. The expression may also appear due to optimization of something like `SELECT (NULL, NULL) = (8, 0) OR (NULL, NULL) = (3, 2) OR (NULL, NULL) = (0, 0) OR (NULL, NULL) = (3, 1)`. This closes [#22017](https://github.com/ClickHouse/ClickHouse/issues/22017). [#22063](https://github.com/ClickHouse/ClickHouse/pull/22063) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update used version of simdjson to 0.9.1. This fixes [#21984](https://github.com/ClickHouse/ClickHouse/issues/21984). [#22057](https://github.com/ClickHouse/ClickHouse/pull/22057) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Added case insensitive aliases for `CONNECTION_ID()` and `VERSION()` functions. This fixes [#22028](https://github.com/ClickHouse/ClickHouse/issues/22028). [#22042](https://github.com/ClickHouse/ClickHouse/pull/22042) ([Eugene Klimov](https://github.com/Slach)).
|
||||||
|
* Add option `strict_increase` to `windowFunnel` function to calculate each event once (resolve [#21835](https://github.com/ClickHouse/ClickHouse/issues/21835)). [#22025](https://github.com/ClickHouse/ClickHouse/pull/22025) ([Vladimir](https://github.com/vdimir)).
|
||||||
|
* If partition key of a `MergeTree` table does not include `Date` or `DateTime` columns but includes exactly one `DateTime64` column, expose its values in the `min_time` and `max_time` columns in `system.parts` and `system.parts_columns` tables. Add `min_time` and `max_time` columns to `system.parts_columns` table (these was inconsistency to the `system.parts` table). This closes [#18244](https://github.com/ClickHouse/ClickHouse/issues/18244). [#22011](https://github.com/ClickHouse/ClickHouse/pull/22011) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Supported `replication_alter_partitions_sync=1` setting in `clickhouse-copier` for moving partitions from helping table to destination. Decreased default timeouts. Fixes [#21911](https://github.com/ClickHouse/ClickHouse/issues/21911). [#21912](https://github.com/ClickHouse/ClickHouse/pull/21912) ([turbo jason](https://github.com/songenjie)).
|
||||||
|
* Show path to data directory of `EmbeddedRocksDB` tables in system tables. [#21903](https://github.com/ClickHouse/ClickHouse/pull/21903) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Add profile event `HedgedRequestsChangeReplica`, change read data timeout from sec to ms. [#21886](https://github.com/ClickHouse/ClickHouse/pull/21886) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* DiskS3 (experimental feature under development). Fixed bug with the impossibility to move directory if the destination is not empty and cache disk is used. [#21837](https://github.com/ClickHouse/ClickHouse/pull/21837) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||||
|
* Better formatting for `Array` and `Map` data types in Web UI. [#21798](https://github.com/ClickHouse/ClickHouse/pull/21798) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update clusters only if their configurations were updated. [#21685](https://github.com/ClickHouse/ClickHouse/pull/21685) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Propagate query and session settings for distributed DDL queries. Set `distributed_ddl_entry_format_version` to 2 to enable this. Added `distributed_ddl_output_mode` setting. Supported modes: `none`, `throw` (default), `null_status_on_timeout` and `never_throw`. Miscellaneous fixes and improvements for `Replicated` database engine. [#21535](https://github.com/ClickHouse/ClickHouse/pull/21535) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* If `PODArray` was instantiated with element size that is neither a fraction or a multiple of 16, buffer overflow was possible. No bugs in current releases exist. [#21533](https://github.com/ClickHouse/ClickHouse/pull/21533) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add `last_error_time`/`last_error_message`/`last_error_stacktrace`/`remote` columns for `system.errors`. [#21529](https://github.com/ClickHouse/ClickHouse/pull/21529) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add aliases `simpleJSONExtract/simpleJSONHas` to `visitParam/visitParamExtract{UInt, Int, Bool, Float, Raw, String}`. Fixes #21383. [#21519](https://github.com/ClickHouse/ClickHouse/pull/21519) ([fastio](https://github.com/fastio)).
|
||||||
|
* Add setting `optimize_skip_unused_shards_limit` to limit the number of sharding key values for `optimize_skip_unused_shards`. [#21512](https://github.com/ClickHouse/ClickHouse/pull/21512) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve `clickhouse-format` to not throw exception when there are extra spaces or comment after the last query, and throw exception early with readable message when format `ASTInsertQuery` with data . [#21311](https://github.com/ClickHouse/ClickHouse/pull/21311) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Improve support of integer keys in data type `Map`. [#21157](https://github.com/ClickHouse/ClickHouse/pull/21157) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* MaterializeMySQL: attempt to reconnect to MySQL if the connection is lost. [#20961](https://github.com/ClickHouse/ClickHouse/pull/20961) ([Håvard Kvålen](https://github.com/havardk)).
|
||||||
|
* Support more cases to rewrite `CROSS JOIN` to `INNER JOIN`. [#20392](https://github.com/ClickHouse/ClickHouse/pull/20392) ([Vladimir](https://github.com/vdimir)).
|
||||||
|
* Do not create empty parts on INSERT when `optimize_on_insert` setting enabled. Fixes [#20304](https://github.com/ClickHouse/ClickHouse/issues/20304). [#20387](https://github.com/ClickHouse/ClickHouse/pull/20387) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* `MaterializeMySQL`: add minmax skipping index for `_version` column. [#20382](https://github.com/ClickHouse/ClickHouse/pull/20382) ([Stig Bakken](https://github.com/stigsb)).
|
||||||
|
* Add option `--backslash` for `clickhouse-format`, which can add a backslash at the end of each line of the formatted query. [#21494](https://github.com/ClickHouse/ClickHouse/pull/21494) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Now clickhouse will not throw `LOGICAL_ERROR` exception when we try to mutate the already covered part. Fixes [#22013](https://github.com/ClickHouse/ClickHouse/issues/22013). [#22291](https://github.com/ClickHouse/ClickHouse/pull/22291) ([alesapin](https://github.com/alesapin)).
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
|
||||||
|
* Remove socket from epoll before cancelling packet receiver in `HedgedConnections` to prevent possible race. Fixes [#22161](https://github.com/ClickHouse/ClickHouse/issues/22161). [#22443](https://github.com/ClickHouse/ClickHouse/pull/22443) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add (missing) memory accounting in parallel parsing routines. In previous versions OOM was possible when the resultset contains very large blocks of data. This closes [#22008](https://github.com/ClickHouse/ClickHouse/issues/22008). [#22425](https://github.com/ClickHouse/ClickHouse/pull/22425) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix exception which may happen when `SELECT` has constant `WHERE` condition and source table has columns which names are digits. [#22270](https://github.com/ClickHouse/ClickHouse/pull/22270) ([LiuNeng](https://github.com/liuneng1994)).
|
||||||
|
* Fix query cancellation with `use_hedged_requests=0` and `async_socket_for_remote=1`. [#22183](https://github.com/ClickHouse/ClickHouse/pull/22183) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix uncaught exception in `InterserverIOHTTPHandler`. [#22146](https://github.com/ClickHouse/ClickHouse/pull/22146) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix docker entrypoint in case `http_port` is not in the config. [#22132](https://github.com/ClickHouse/ClickHouse/pull/22132) ([Ewout](https://github.com/devwout)).
|
||||||
|
* Fix error `Invalid number of rows in Chunk` in `JOIN` with `TOTALS` and `arrayJoin`. Closes [#19303](https://github.com/ClickHouse/ClickHouse/issues/19303). [#22129](https://github.com/ClickHouse/ClickHouse/pull/22129) ([Vladimir](https://github.com/vdimir)).
|
||||||
|
* Fix the background thread pool name which used to poll message from Kafka. The Kafka engine with the broken thread pool will not consume the message from message queue. [#22122](https://github.com/ClickHouse/ClickHouse/pull/22122) ([fastio](https://github.com/fastio)).
|
||||||
|
* Fix waiting for `OPTIMIZE` and `ALTER` queries for `ReplicatedMergeTree` table engines. Now the query will not hang when the table was detached or restarted. [#22118](https://github.com/ClickHouse/ClickHouse/pull/22118) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Disable `async_socket_for_remote`/`use_hedged_requests` for buggy Linux kernels. [#22109](https://github.com/ClickHouse/ClickHouse/pull/22109) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Docker entrypoint: avoid chown of `.` in case when `LOG_PATH` is empty. Closes [#22100](https://github.com/ClickHouse/ClickHouse/issues/22100). [#22102](https://github.com/ClickHouse/ClickHouse/pull/22102) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* The function `decrypt` was lacking a check for the minimal size of data encrypted in `AEAD` mode. This closes [#21897](https://github.com/ClickHouse/ClickHouse/issues/21897). [#22064](https://github.com/ClickHouse/ClickHouse/pull/22064) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* In rare case, merge for `CollapsingMergeTree` may create granule with `index_granularity + 1` rows. Because of this, internal check, added in [#18928](https://github.com/ClickHouse/ClickHouse/issues/18928) (affects 21.2 and 21.3), may fail with error `Incomplete granules are not allowed while blocks are granules size`. This error did not allow parts to merge. [#21976](https://github.com/ClickHouse/ClickHouse/pull/21976) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Reverted [#15454](https://github.com/ClickHouse/ClickHouse/issues/15454) that may cause significant increase in memory usage while loading external dictionaries of hashed type. This closes [#21935](https://github.com/ClickHouse/ClickHouse/issues/21935). [#21948](https://github.com/ClickHouse/ClickHouse/pull/21948) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Prevent hedged connections overlaps (`Unknown packet 9 from server` error). [#21941](https://github.com/ClickHouse/ClickHouse/pull/21941) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix reading the HTTP POST request with "multipart/form-data" content type in some cases. [#21936](https://github.com/ClickHouse/ClickHouse/pull/21936) ([Ivan](https://github.com/abyss7)).
|
||||||
|
* Fix wrong `ORDER BY` results when a query contains window functions, and optimization for reading in primary key order is applied. Fixes [#21828](https://github.com/ClickHouse/ClickHouse/issues/21828). [#21915](https://github.com/ClickHouse/ClickHouse/pull/21915) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||||
|
* Fix deadlock in first catboost model execution. Closes [#13832](https://github.com/ClickHouse/ClickHouse/issues/13832). [#21844](https://github.com/ClickHouse/ClickHouse/pull/21844) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix incorrect query result (and possible crash) which could happen when `WHERE` or `HAVING` condition is pushed before `GROUP BY`. Fixes [#21773](https://github.com/ClickHouse/ClickHouse/issues/21773). [#21841](https://github.com/ClickHouse/ClickHouse/pull/21841) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Better error handling and logging in `WriteBufferFromS3`. [#21836](https://github.com/ClickHouse/ClickHouse/pull/21836) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||||
|
* Fix possible crashes in aggregate functions with combinator `Distinct`, while using two-level aggregation. This is a follow-up fix of [#18365](https://github.com/ClickHouse/ClickHouse/pull/18365) . Can only reproduced in production env. [#21818](https://github.com/ClickHouse/ClickHouse/pull/21818) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix scalar subquery index analysis. This fixes [#21717](https://github.com/ClickHouse/ClickHouse/issues/21717) , which was introduced in [#18896](https://github.com/ClickHouse/ClickHouse/pull/18896). [#21766](https://github.com/ClickHouse/ClickHouse/pull/21766) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix bug for `ReplicatedMerge` table engines when `ALTER MODIFY COLUMN` query doesn't change the type of `Decimal` column if its size (32 bit or 64 bit) doesn't change. [#21728](https://github.com/ClickHouse/ClickHouse/pull/21728) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix possible infinite waiting when concurrent `OPTIMIZE` and `DROP` are run for `ReplicatedMergeTree`. [#21716](https://github.com/ClickHouse/ClickHouse/pull/21716) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix function `arrayElement` with type `Map` for constant integer arguments. [#21699](https://github.com/ClickHouse/ClickHouse/pull/21699) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix SIGSEGV on not existing attributes from `ip_trie` with `access_to_key_from_attributes`. [#21692](https://github.com/ClickHouse/ClickHouse/pull/21692) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Server now start accepting connections only after `DDLWorker` and dictionaries initialization. [#21676](https://github.com/ClickHouse/ClickHouse/pull/21676) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add type conversion for keys of tables of type `Join` (previously led to SIGSEGV). [#21646](https://github.com/ClickHouse/ClickHouse/pull/21646) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix distributed requests cancellation (for example simple select from multiple shards with limit, i.e. `select * from remote('127.{2,3}', system.numbers) limit 100`) with `async_socket_for_remote=1`. [#21643](https://github.com/ClickHouse/ClickHouse/pull/21643) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `fsync_part_directory` for horizontal merge. [#21642](https://github.com/ClickHouse/ClickHouse/pull/21642) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Remove unknown columns from joined table in `WHERE` for queries to external database engines (MySQL, PostgreSQL). close [#14614](https://github.com/ClickHouse/ClickHouse/issues/14614), close [#19288](https://github.com/ClickHouse/ClickHouse/issues/19288) (dup), close [#19645](https://github.com/ClickHouse/ClickHouse/issues/19645) (dup). [#21640](https://github.com/ClickHouse/ClickHouse/pull/21640) ([Vladimir](https://github.com/vdimir)).
|
||||||
|
* `std::terminate` was called if there is an error writing data into s3. [#21624](https://github.com/ClickHouse/ClickHouse/pull/21624) ([Vladimir](https://github.com/vdimir)).
|
||||||
|
* Fix possible error `Cannot find column` when `optimize_skip_unused_shards` is enabled and zero shards are used. [#21579](https://github.com/ClickHouse/ClickHouse/pull/21579) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* In case if query has constant `WHERE` condition, and setting `optimize_skip_unused_shards` enabled, all shards may be skipped and query could return incorrect empty result. [#21550](https://github.com/ClickHouse/ClickHouse/pull/21550) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix table function `clusterAllReplicas` returns wrong `_shard_num`. close [#21481](https://github.com/ClickHouse/ClickHouse/issues/21481). [#21498](https://github.com/ClickHouse/ClickHouse/pull/21498) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Fix that S3 table holds old credentials after config update. [#21457](https://github.com/ClickHouse/ClickHouse/pull/21457) ([Grigory Pervakov](https://github.com/GrigoryPervakov)).
|
||||||
|
* Fixed race on SSL object inside `SecureSocket` in Poco. [#21456](https://github.com/ClickHouse/ClickHouse/pull/21456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix `Avro` format parsing for `Kafka`. Fixes [#21437](https://github.com/ClickHouse/ClickHouse/issues/21437). [#21438](https://github.com/ClickHouse/ClickHouse/pull/21438) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||||
|
* Fix receive and send timeouts and non-blocking read in secure socket. [#21429](https://github.com/ClickHouse/ClickHouse/pull/21429) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* `force_drop_table` flag didn't work for `MATERIALIZED VIEW`, it's fixed. Fixes [#18943](https://github.com/ClickHouse/ClickHouse/issues/18943). [#20626](https://github.com/ClickHouse/ClickHouse/pull/20626) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fix name clashes in `PredicateRewriteVisitor`. It caused incorrect `WHERE` filtration after full join. Close [#20497](https://github.com/ClickHouse/ClickHouse/issues/20497). [#20622](https://github.com/ClickHouse/ClickHouse/pull/20622) ([Vladimir](https://github.com/vdimir)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
|
* Add [Jepsen](https://github.com/jepsen-io/jepsen) tests for ClickHouse Keeper. [#21677](https://github.com/ClickHouse/ClickHouse/pull/21677) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Run stateless tests in parallel in CI. Depends on [#22181](https://github.com/ClickHouse/ClickHouse/issues/22181). [#22300](https://github.com/ClickHouse/ClickHouse/pull/22300) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Enable status check for [SQLancer](https://github.com/sqlancer/sqlancer) CI run. [#22015](https://github.com/ClickHouse/ClickHouse/pull/22015) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Multiple preparations for PowerPC builds: Enable the bundled openldap on `ppc64le`. [#22487](https://github.com/ClickHouse/ClickHouse/pull/22487) ([Kfir Itzhak](https://github.com/mastertheknife)). Enable compiling on `ppc64le` with Clang. [#22476](https://github.com/ClickHouse/ClickHouse/pull/22476) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix compiling boost on `ppc64le`. [#22474](https://github.com/ClickHouse/ClickHouse/pull/22474) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix CMake error about internal CMake variable `CMAKE_ASM_COMPILE_OBJECT` not set on `ppc64le`. [#22469](https://github.com/ClickHouse/ClickHouse/pull/22469) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix Fedora/RHEL/CentOS not finding `libclang_rt.builtins` on `ppc64le`. [#22458](https://github.com/ClickHouse/ClickHouse/pull/22458) ([Kfir Itzhak](https://github.com/mastertheknife)). Enable building with `jemalloc` on `ppc64le`. [#22447](https://github.com/ClickHouse/ClickHouse/pull/22447) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix ClickHouse's config embedding and cctz's timezone embedding on `ppc64le`. [#22445](https://github.com/ClickHouse/ClickHouse/pull/22445) ([Kfir Itzhak](https://github.com/mastertheknife)). Fixed compiling on `ppc64le` and use the correct instruction pointer register on `ppc64le`. [#22430](https://github.com/ClickHouse/ClickHouse/pull/22430) ([Kfir Itzhak](https://github.com/mastertheknife)).
|
||||||
|
* Re-enable the S3 (AWS) library on `aarch64`. [#22484](https://github.com/ClickHouse/ClickHouse/pull/22484) ([Kfir Itzhak](https://github.com/mastertheknife)).
|
||||||
|
* Add `tzdata` to Docker containers because reading `ORC` formats requires it. This closes [#14156](https://github.com/ClickHouse/ClickHouse/issues/14156). [#22000](https://github.com/ClickHouse/ClickHouse/pull/22000) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Introduce 2 arguments for `clickhouse-server` image Dockerfile: `deb_location` & `single_binary_location`. [#21977](https://github.com/ClickHouse/ClickHouse/pull/21977) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Allow to use clang-tidy with release builds by enabling assertions if it is used. [#21914](https://github.com/ClickHouse/ClickHouse/pull/21914) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add llvm-12 binaries name to search in cmake scripts. Implicit constants conversions to mute clang warnings. Updated submodules to build with CMake 3.19. Mute recursion in macro expansion in `readpassphrase` library. Deprecated `-fuse-ld` changed to `--ld-path` for clang. [#21597](https://github.com/ClickHouse/ClickHouse/pull/21597) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Updating `docker/test/testflows/runner/dockerd-entrypoint.sh` to use Yandex dockerhub-proxy, because Docker Hub has enabled very restrictive rate limits [#21551](https://github.com/ClickHouse/ClickHouse/pull/21551) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||||
|
* Fix macOS shared lib build. [#20184](https://github.com/ClickHouse/ClickHouse/pull/20184) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
|
* Add `ctime` option to `zookeeper-dump-tree`. It allows to dump node creation time. [#21842](https://github.com/ClickHouse/ClickHouse/pull/21842) ([Ilya](https://github.com/HumanUser)).
|
||||||
|
|
||||||
|
|
||||||
|
## ClickHouse release 21.3 (LTS)
|
||||||
|
|
||||||
|
### ClickHouse release v21.3, 2021-03-12
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
|
||||||
|
* Now it's not allowed to create MergeTree tables in old syntax with table TTL because it's just ignored. Attach of old tables is still possible. [#20282](https://github.com/ClickHouse/ClickHouse/pull/20282) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Now all case-insensitive function names will be rewritten to their canonical representations. This is needed for projection query routing (the upcoming feature). [#20174](https://github.com/ClickHouse/ClickHouse/pull/20174) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix creation of `TTL` in cases, when its expression is a function and it is the same as `ORDER BY` key. Now it's allowed to set custom aggregation to primary key columns in `TTL` with `GROUP BY`. Backward incompatible: For primary key columns, which are not in `GROUP BY` and aren't set explicitly now is applied function `any` instead of `max`, when TTL is expired. Also if you use TTL with `WHERE` or `GROUP BY` you can see exceptions at merges, while making rolling update. [#15450](https://github.com/ClickHouse/ClickHouse/pull/15450) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
|
||||||
|
* Add file engine settings: `engine_file_empty_if_not_exists` and `engine_file_truncate_on_insert`. [#20620](https://github.com/ClickHouse/ClickHouse/pull/20620) ([M0r64n](https://github.com/M0r64n)).
|
||||||
|
* Add aggregate function `deltaSum` for summing the differences between consecutive rows. [#20057](https://github.com/ClickHouse/ClickHouse/pull/20057) ([Russ Frank](https://github.com/rf)).
|
||||||
|
* New `event_time_microseconds` column in `system.part_log` table. [#20027](https://github.com/ClickHouse/ClickHouse/pull/20027) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||||
|
* Added `timezoneOffset(datetime)` function which will give the offset from UTC in seconds. This close [#issue:19850](https://github.com/ClickHouse/ClickHouse/issues/19850). [#19962](https://github.com/ClickHouse/ClickHouse/pull/19962) ([keenwolf](https://github.com/keen-wolf)).
|
||||||
|
* Add setting `insert_shard_id` to support insert data into specific shard from distributed table. [#19961](https://github.com/ClickHouse/ClickHouse/pull/19961) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Function `reinterpretAs` updated to support big integers. Fixes [#19691](https://github.com/ClickHouse/ClickHouse/issues/19691). [#19858](https://github.com/ClickHouse/ClickHouse/pull/19858) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Added Server Side Encryption Customer Keys (the `x-amz-server-side-encryption-customer-(key/md5)` header) support in S3 client. See [the link](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). Closes [#19428](https://github.com/ClickHouse/ClickHouse/issues/19428). [#19748](https://github.com/ClickHouse/ClickHouse/pull/19748) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
* Added `implicit_key` option for `executable` dictionary source. It allows to avoid printing key for every record if records comes in the same order as the input keys. Implements [#14527](https://github.com/ClickHouse/ClickHouse/issues/14527). [#19677](https://github.com/ClickHouse/ClickHouse/pull/19677) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Add quota type `query_selects` and `query_inserts`. [#19603](https://github.com/ClickHouse/ClickHouse/pull/19603) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
* Add function `extractTextFromHTML` [#19600](https://github.com/ClickHouse/ClickHouse/pull/19600) ([zlx19950903](https://github.com/zlx19950903)), ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Tables with `MergeTree*` engine now have two new table-level settings for query concurrency control. Setting `max_concurrent_queries` limits the number of concurrently executed queries which are related to this table. Setting `min_marks_to_honor_max_concurrent_queries` tells to apply previous setting only if query reads at least this number of marks. [#19544](https://github.com/ClickHouse/ClickHouse/pull/19544) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Added `file` function to read file from user_files directory as a String. This is different from the `file` table function. This implements [#issue:18851](https://github.com/ClickHouse/ClickHouse/issues/18851). [#19204](https://github.com/ClickHouse/ClickHouse/pull/19204) ([keenwolf](https://github.com/keen-wolf)).
|
||||||
|
|
||||||
|
#### Experimental feature
|
||||||
|
|
||||||
|
* Add experimental `Replicated` database engine. It replicates DDL queries across multiple hosts. [#16193](https://github.com/ClickHouse/ClickHouse/pull/16193) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Introduce experimental support for window functions, enabled with `allow_experimental_window_functions = 1`. This is a preliminary, alpha-quality implementation that is not suitable for production use and will change in backward-incompatible ways in future releases. Please see [the documentation](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/sql-reference/window-functions/index.md#experimental-window-functions) for the list of supported features. [#20337](https://github.com/ClickHouse/ClickHouse/pull/20337) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||||
|
* Add the ability to backup/restore metadata files for DiskS3. [#18377](https://github.com/ClickHouse/ClickHouse/pull/18377) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
|
||||||
|
* Hedged requests for remote queries. When setting `use_hedged_requests` enabled (off by default), allow to establish many connections with different replicas for query. New connection is enabled in case existent connection(s) with replica(s) were not established within `hedged_connection_timeout` or no data was received within `receive_data_timeout`. Query uses the first connection which send non empty progress packet (or data packet, if `allow_changing_replica_until_first_data_packet`); other connections are cancelled. Queries with `max_parallel_replicas > 1` are supported. [#19291](https://github.com/ClickHouse/ClickHouse/pull/19291) ([Kruglov Pavel](https://github.com/Avogar)). This allows to significantly reduce tail latencies on very large clusters.
|
||||||
|
* Added support for `PREWHERE` (and enable the corresponding optimization) when tables have row-level security expressions specified. [#19576](https://github.com/ClickHouse/ClickHouse/pull/19576) ([Denis Glazachev](https://github.com/traceon)).
|
||||||
|
* The setting `distributed_aggregation_memory_efficient` is enabled by default. It will lower memory usage and improve performance of distributed queries. [#20599](https://github.com/ClickHouse/ClickHouse/pull/20599) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve performance of GROUP BY multiple fixed size keys. [#20472](https://github.com/ClickHouse/ClickHouse/pull/20472) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve performance of aggregate functions by more strict aliasing. [#19946](https://github.com/ClickHouse/ClickHouse/pull/19946) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Speed up reading from `Memory` tables in extreme cases (when reading speed is in order of 50 GB/sec) by simplification of pipeline and (consequently) less lock contention in pipeline scheduling. [#20468](https://github.com/ClickHouse/ClickHouse/pull/20468) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Partially reimplement HTTP server to make it making less copies of incoming and outgoing data. It gives up to 1.5 performance improvement on inserting long records over HTTP. [#19516](https://github.com/ClickHouse/ClickHouse/pull/19516) ([Ivan](https://github.com/abyss7)).
|
||||||
|
* Add `compress` setting for `Memory` tables. If it's enabled the table will use less RAM. On some machines and datasets it can also work faster on SELECT, but it is not always the case. This closes [#20093](https://github.com/ClickHouse/ClickHouse/issues/20093). Note: there are reasons why Memory tables can work slower than MergeTree: (1) lack of compression (2) static size of blocks (3) lack of indices and prewhere... [#20168](https://github.com/ClickHouse/ClickHouse/pull/20168) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Slightly better code in aggregation. [#20978](https://github.com/ClickHouse/ClickHouse/pull/20978) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add back `intDiv`/`modulo` specializations for better performance. This fixes [#21293](https://github.com/ClickHouse/ClickHouse/issues/21293) . The regression was introduced in https://github.com/ClickHouse/ClickHouse/pull/18145 . [#21307](https://github.com/ClickHouse/ClickHouse/pull/21307) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Do not squash blocks too much on INSERT SELECT if inserting into Memory table. In previous versions inefficient data representation was created in Memory table after INSERT SELECT. This closes [#13052](https://github.com/ClickHouse/ClickHouse/issues/13052). [#20169](https://github.com/ClickHouse/ClickHouse/pull/20169) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix at least one case when DataType parser may have exponential complexity (found by fuzzer). This closes [#20096](https://github.com/ClickHouse/ClickHouse/issues/20096). [#20132](https://github.com/ClickHouse/ClickHouse/pull/20132) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Parallelize SELECT with FINAL for single part with level > 0 when `do_not_merge_across_partitions_select_final` setting is 1. [#19375](https://github.com/ClickHouse/ClickHouse/pull/19375) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fill only requested columns when querying `system.parts` and `system.parts_columns`. Closes [#19570](https://github.com/ClickHouse/ClickHouse/issues/19570). [#21035](https://github.com/ClickHouse/ClickHouse/pull/21035) ([Anmol Arora](https://github.com/anmolarora)).
|
||||||
|
* Perform algebraic optimizations of arithmetic expressions inside `avg` aggregate function. close [#20092](https://github.com/ClickHouse/ClickHouse/issues/20092). [#20183](https://github.com/ClickHouse/ClickHouse/pull/20183) ([flynn](https://github.com/ucasFL)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
|
||||||
|
* Case-insensitive compression methods for table functions. Also fixed LZMA compression method which was checked in upper case. [#21416](https://github.com/ClickHouse/ClickHouse/pull/21416) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
* Add two settings to delay or throw error during insertion when there are too many inactive parts. This is useful when server fails to clean up parts quickly enough. [#20178](https://github.com/ClickHouse/ClickHouse/pull/20178) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Provide better compatibility for mysql clients. 1. mysql jdbc 2. mycli. [#21367](https://github.com/ClickHouse/ClickHouse/pull/21367) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Forbid to drop a column if it's referenced by materialized view. Closes [#21164](https://github.com/ClickHouse/ClickHouse/issues/21164). [#21303](https://github.com/ClickHouse/ClickHouse/pull/21303) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* MySQL dictionary source will now retry unexpected connection failures (Lost connection to MySQL server during query) which sometimes happen on SSL/TLS connections. [#21237](https://github.com/ClickHouse/ClickHouse/pull/21237) ([Alexander Kazakov](https://github.com/Akazz)).
|
||||||
|
* Usability improvement: more consistent `DateTime64` parsing: recognize the case when unix timestamp with subsecond resolution is specified as scaled integer (like `1111111111222` instead of `1111111111.222`). This closes [#13194](https://github.com/ClickHouse/ClickHouse/issues/13194). [#21053](https://github.com/ClickHouse/ClickHouse/pull/21053) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Do only merging of sorted blocks on initiator with distributed_group_by_no_merge. [#20882](https://github.com/ClickHouse/ClickHouse/pull/20882) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* When loading config for mysql source ClickHouse will now randomize the list of replicas with the same priority to ensure the round-robin logics of picking mysql endpoint. This closes [#20629](https://github.com/ClickHouse/ClickHouse/issues/20629). [#20632](https://github.com/ClickHouse/ClickHouse/pull/20632) ([Alexander Kazakov](https://github.com/Akazz)).
|
||||||
|
* Function 'reinterpretAs(x, Type)' renamed into 'reinterpret(x, Type)'. [#20611](https://github.com/ClickHouse/ClickHouse/pull/20611) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Support vhost for RabbitMQ engine [#20576](https://github.com/ClickHouse/ClickHouse/issues/20576). [#20596](https://github.com/ClickHouse/ClickHouse/pull/20596) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Improved serialization for data types combined of Arrays and Tuples. Improved matching enum data types to protobuf enum type. Fixed serialization of the `Map` data type. Omitted values are now set by default. [#20506](https://github.com/ClickHouse/ClickHouse/pull/20506) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fixed race between execution of distributed DDL tasks and cleanup of DDL queue. Now DDL task cannot be removed from ZooKeeper if there are active workers. Fixes [#20016](https://github.com/ClickHouse/ClickHouse/issues/20016). [#20448](https://github.com/ClickHouse/ClickHouse/pull/20448) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Make FQDN and other DNS related functions work correctly in alpine images. [#20336](https://github.com/ClickHouse/ClickHouse/pull/20336) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Do not allow early constant folding of explicitly forbidden functions. [#20303](https://github.com/ClickHouse/ClickHouse/pull/20303) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Implicit conversion from integer to Decimal type might succeeded if integer value doe not fit into Decimal type. Now it throws `ARGUMENT_OUT_OF_BOUND`. [#20232](https://github.com/ClickHouse/ClickHouse/pull/20232) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Lockless `SYSTEM FLUSH DISTRIBUTED`. [#20215](https://github.com/ClickHouse/ClickHouse/pull/20215) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Normalize count(constant), sum(1) to count(). This is needed for projection query routing. [#20175](https://github.com/ClickHouse/ClickHouse/pull/20175) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Support all native integer types in bitmap functions. [#20171](https://github.com/ClickHouse/ClickHouse/pull/20171) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Updated `CacheDictionary`, `ComplexCacheDictionary`, `SSDCacheDictionary`, `SSDComplexKeyDictionary` to use LRUHashMap as underlying index. [#20164](https://github.com/ClickHouse/ClickHouse/pull/20164) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* The setting `access_management` is now configurable on startup by providing `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT`, defaults to disabled (`0`) which was the prior value. [#20139](https://github.com/ClickHouse/ClickHouse/pull/20139) ([Marquitos](https://github.com/sonirico)).
|
||||||
|
* Fix toDateTime64(toDate()/toDateTime()) for DateTime64 - Implement DateTime64 clamping to match DateTime behaviour. [#20131](https://github.com/ClickHouse/ClickHouse/pull/20131) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Quota improvements: SHOW TABLES is now considered as one query in the quota calculations, not two queries. SYSTEM queries now consume quota. Fix calculation of interval's end in quota consumption. [#20106](https://github.com/ClickHouse/ClickHouse/pull/20106) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Supports `path IN (set)` expressions for `system.zookeeper` table. [#20105](https://github.com/ClickHouse/ClickHouse/pull/20105) ([小路](https://github.com/nicelulu)).
|
||||||
|
* Show full details of `MaterializeMySQL` tables in `system.tables`. [#20051](https://github.com/ClickHouse/ClickHouse/pull/20051) ([Stig Bakken](https://github.com/stigsb)).
|
||||||
|
* Fix data race in executable dictionary that was possible only on misuse (when the script returns data ignoring its input). [#20045](https://github.com/ClickHouse/ClickHouse/pull/20045) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The value of MYSQL_OPT_RECONNECT option can now be controlled by "opt_reconnect" parameter in the config section of mysql replica. [#19998](https://github.com/ClickHouse/ClickHouse/pull/19998) ([Alexander Kazakov](https://github.com/Akazz)).
|
||||||
|
* If user calls `JSONExtract` function with `Float32` type requested, allow inaccurate conversion to the result type. For example the number `0.1` in JSON is double precision and is not representable in Float32, but the user still wants to get it. Previous versions return 0 for non-Nullable type and NULL for Nullable type to indicate that conversion is imprecise. The logic was 100% correct but it was surprising to users and leading to questions. This closes [#13962](https://github.com/ClickHouse/ClickHouse/issues/13962). [#19960](https://github.com/ClickHouse/ClickHouse/pull/19960) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add conversion of block structure for INSERT into Distributed tables if it does not match. [#19947](https://github.com/ClickHouse/ClickHouse/pull/19947) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improvement for the `system.distributed_ddl_queue` table. Initialize MaxDDLEntryID to the last value after restarting. Before this PR, MaxDDLEntryID will remain zero until a new DDLTask is processed. [#19924](https://github.com/ClickHouse/ClickHouse/pull/19924) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Show `MaterializeMySQL` tables in `system.parts`. [#19770](https://github.com/ClickHouse/ClickHouse/pull/19770) ([Stig Bakken](https://github.com/stigsb)).
|
||||||
|
* Add separate config directive for `Buffer` profile. [#19721](https://github.com/ClickHouse/ClickHouse/pull/19721) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Move conditions that are not related to JOIN to WHERE clause. [#18720](https://github.com/ClickHouse/ClickHouse/issues/18720). [#19685](https://github.com/ClickHouse/ClickHouse/pull/19685) ([hexiaoting](https://github.com/hexiaoting)).
|
||||||
|
* Add ability to throttle INSERT into Distributed based on amount of pending bytes for async send (`bytes_to_delay_insert`/`max_delay_to_insert` and `bytes_to_throw_insert` settings for `Distributed` engine has been added). [#19673](https://github.com/ClickHouse/ClickHouse/pull/19673) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix some rare cases when write errors can be ignored in destructors. [#19451](https://github.com/ClickHouse/ClickHouse/pull/19451) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Print inline frames in stack traces for fatal errors. [#19317](https://github.com/ClickHouse/ClickHouse/pull/19317) ([Ivan](https://github.com/abyss7)).
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
|
||||||
|
* Fix redundant reconnects to ZooKeeper and the possibility of two active sessions for a single clickhouse server. Both problems introduced in #14678. [#21264](https://github.com/ClickHouse/ClickHouse/pull/21264) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix error `Bad cast from type ... to DB::ColumnLowCardinality` while inserting into table with `LowCardinality` column from `Values` format. Fixes #21140 [#21357](https://github.com/ClickHouse/ClickHouse/pull/21357) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix a deadlock in `ALTER DELETE` mutations for non replicated MergeTree table engines when the predicate contains the table itself. Fixes [#20558](https://github.com/ClickHouse/ClickHouse/issues/20558). [#21477](https://github.com/ClickHouse/ClickHouse/pull/21477) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix SIGSEGV for distributed queries on failures. [#21434](https://github.com/ClickHouse/ClickHouse/pull/21434) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Now `ALTER MODIFY COLUMN` queries will correctly affect changes in partition key, skip indices, TTLs, and so on. Fixes [#13675](https://github.com/ClickHouse/ClickHouse/issues/13675). [#21334](https://github.com/ClickHouse/ClickHouse/pull/21334) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix bug with `join_use_nulls` and joining `TOTALS` from subqueries. This closes [#19362](https://github.com/ClickHouse/ClickHouse/issues/19362) and [#21137](https://github.com/ClickHouse/ClickHouse/issues/21137). [#21248](https://github.com/ClickHouse/ClickHouse/pull/21248) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix crash in `EXPLAIN` for query with `UNION`. Fixes [#20876](https://github.com/ClickHouse/ClickHouse/issues/20876), [#21170](https://github.com/ClickHouse/ClickHouse/issues/21170). [#21246](https://github.com/ClickHouse/ClickHouse/pull/21246) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Now mutations allowed only for table engines that support them (MergeTree family, Memory, MaterializedView). Other engines will report a more clear error. Fixes [#21168](https://github.com/ClickHouse/ClickHouse/issues/21168). [#21183](https://github.com/ClickHouse/ClickHouse/pull/21183) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixes [#21112](https://github.com/ClickHouse/ClickHouse/issues/21112). Fixed bug that could cause duplicates with insert query (if one of the callbacks came a little too late). [#21138](https://github.com/ClickHouse/ClickHouse/pull/21138) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix `input_format_null_as_default` take effective when types are nullable. This fixes [#21116](https://github.com/ClickHouse/ClickHouse/issues/21116) . [#21121](https://github.com/ClickHouse/ClickHouse/pull/21121) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* fix bug related to cast Tuple to Map. Closes [#21029](https://github.com/ClickHouse/ClickHouse/issues/21029). [#21120](https://github.com/ClickHouse/ClickHouse/pull/21120) ([hexiaoting](https://github.com/hexiaoting)).
|
||||||
|
* Fix the metadata leak when the Replicated*MergeTree with custom (non default) ZooKeeper cluster is dropped. [#21119](https://github.com/ClickHouse/ClickHouse/pull/21119) ([fastio](https://github.com/fastio)).
|
||||||
|
* Fix type mismatch issue when using LowCardinality keys in joinGet. This fixes [#21114](https://github.com/ClickHouse/ClickHouse/issues/21114). [#21117](https://github.com/ClickHouse/ClickHouse/pull/21117) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* fix default_replica_path and default_replica_name values are useless on Replicated(*)MergeTree engine when the engine needs specify other parameters. [#21060](https://github.com/ClickHouse/ClickHouse/pull/21060) ([mxzlxy](https://github.com/mxzlxy)).
|
||||||
|
* Out of bound memory access was possible when formatting specifically crafted out of range value of type `DateTime64`. This closes [#20494](https://github.com/ClickHouse/ClickHouse/issues/20494). This closes [#20543](https://github.com/ClickHouse/ClickHouse/issues/20543). [#21023](https://github.com/ClickHouse/ClickHouse/pull/21023) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Block parallel insertions into storage join. [#21009](https://github.com/ClickHouse/ClickHouse/pull/21009) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fixed behaviour, when `ALTER MODIFY COLUMN` created mutation, that will knowingly fail. [#21007](https://github.com/ClickHouse/ClickHouse/pull/21007) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Closes [#9969](https://github.com/ClickHouse/ClickHouse/issues/9969). Fixed Brotli http compression error, which reproduced for large data sizes, slightly complicated structure and with json output format. Update Brotli to the latest version to include the "fix rare access to uninitialized data in ring-buffer". [#20991](https://github.com/ClickHouse/ClickHouse/pull/20991) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix 'Empty task was returned from async task queue' on query cancellation. [#20881](https://github.com/ClickHouse/ClickHouse/pull/20881) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* `USE database;` query did not work when using MySQL 5.7 client to connect to ClickHouse server, it's fixed. Fixes [#18926](https://github.com/ClickHouse/ClickHouse/issues/18926). [#20878](https://github.com/ClickHouse/ClickHouse/pull/20878) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fix usage of `-Distinct` combinator with `-State` combinator in aggregate functions. [#20866](https://github.com/ClickHouse/ClickHouse/pull/20866) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix subquery with union distinct and limit clause. close [#20597](https://github.com/ClickHouse/ClickHouse/issues/20597). [#20610](https://github.com/ClickHouse/ClickHouse/pull/20610) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Fixed inconsistent behavior of dictionary in case of queries where we look for absent keys in dictionary. [#20578](https://github.com/ClickHouse/ClickHouse/pull/20578) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix the number of threads for scalar subqueries and subqueries for index (after [#19007](https://github.com/ClickHouse/ClickHouse/issues/19007) single thread was always used). Fixes [#20457](https://github.com/ClickHouse/ClickHouse/issues/20457), [#20512](https://github.com/ClickHouse/ClickHouse/issues/20512). [#20550](https://github.com/ClickHouse/ClickHouse/pull/20550) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix crash which could happen if unknown packet was received from remove query (was introduced in [#17868](https://github.com/ClickHouse/ClickHouse/issues/17868)). [#20547](https://github.com/ClickHouse/ClickHouse/pull/20547) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add proper checks while parsing directory names for async INSERT (fixes SIGSEGV). [#20498](https://github.com/ClickHouse/ClickHouse/pull/20498) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix function `transform` does not work properly for floating point keys. Closes [#20460](https://github.com/ClickHouse/ClickHouse/issues/20460). [#20479](https://github.com/ClickHouse/ClickHouse/pull/20479) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Fix infinite loop when propagating WITH aliases to subqueries. This fixes [#20388](https://github.com/ClickHouse/ClickHouse/issues/20388). [#20476](https://github.com/ClickHouse/ClickHouse/pull/20476) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix abnormal server termination when http client goes away. [#20464](https://github.com/ClickHouse/ClickHouse/pull/20464) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `LOGICAL_ERROR` for `join_use_nulls=1` when JOIN contains const from SELECT. [#20461](https://github.com/ClickHouse/ClickHouse/pull/20461) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Check if table function `view` is used in expression list and throw an error. This fixes [#20342](https://github.com/ClickHouse/ClickHouse/issues/20342). [#20350](https://github.com/ClickHouse/ClickHouse/pull/20350) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Avoid invalid dereference in RANGE_HASHED() dictionary. [#20345](https://github.com/ClickHouse/ClickHouse/pull/20345) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix null dereference with `join_use_nulls=1`. [#20344](https://github.com/ClickHouse/ClickHouse/pull/20344) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix incorrect result of binary operations between two constant decimals of different scale. Fixes [#20283](https://github.com/ClickHouse/ClickHouse/issues/20283). [#20339](https://github.com/ClickHouse/ClickHouse/pull/20339) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix too often retries of failed background tasks for `ReplicatedMergeTree` table engines family. This could lead to too verbose logging and increased CPU load. Fixes [#20203](https://github.com/ClickHouse/ClickHouse/issues/20203). [#20335](https://github.com/ClickHouse/ClickHouse/pull/20335) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Restrict to `DROP` or `RENAME` version column of `*CollapsingMergeTree` and `ReplacingMergeTree` table engines. [#20300](https://github.com/ClickHouse/ClickHouse/pull/20300) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixed the behavior when in case of broken JSON we tried to read the whole file into memory which leads to exception from the allocator. Fixes [#19719](https://github.com/ClickHouse/ClickHouse/issues/19719). [#20286](https://github.com/ClickHouse/ClickHouse/pull/20286) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix exception during vertical merge for `MergeTree` table engines family which don't allow to perform vertical merges. Fixes [#20259](https://github.com/ClickHouse/ClickHouse/issues/20259). [#20279](https://github.com/ClickHouse/ClickHouse/pull/20279) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix rare server crash on config reload during the shutdown. Fixes [#19689](https://github.com/ClickHouse/ClickHouse/issues/19689). [#20224](https://github.com/ClickHouse/ClickHouse/pull/20224) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix CTE when using in INSERT SELECT. This fixes [#20187](https://github.com/ClickHouse/ClickHouse/issues/20187), fixes [#20195](https://github.com/ClickHouse/ClickHouse/issues/20195). [#20211](https://github.com/ClickHouse/ClickHouse/pull/20211) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fixes [#19314](https://github.com/ClickHouse/ClickHouse/issues/19314). [#20156](https://github.com/ClickHouse/ClickHouse/pull/20156) ([Ivan](https://github.com/abyss7)).
|
||||||
|
* fix toMinute function to handle special timezone correctly. [#20149](https://github.com/ClickHouse/ClickHouse/pull/20149) ([keenwolf](https://github.com/keen-wolf)).
|
||||||
|
* Fix server crash after query with `if` function with `Tuple` type of then/else branches result. `Tuple` type must contain `Array` or another complex type. Fixes [#18356](https://github.com/ClickHouse/ClickHouse/issues/18356). [#20133](https://github.com/ClickHouse/ClickHouse/pull/20133) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* The `MongoDB` table engine now establishes connection only when it's going to read data. `ATTACH TABLE` won't try to connect anymore. [#20110](https://github.com/ClickHouse/ClickHouse/pull/20110) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Bugfix in StorageJoin. [#20079](https://github.com/ClickHouse/ClickHouse/pull/20079) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix the case when calculating modulo of division of negative number by small divisor, the resulting data type was not large enough to accomodate the negative result. This closes [#20052](https://github.com/ClickHouse/ClickHouse/issues/20052). [#20067](https://github.com/ClickHouse/ClickHouse/pull/20067) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* MaterializeMySQL: Fix replication for statements that update several tables. [#20066](https://github.com/ClickHouse/ClickHouse/pull/20066) ([Håvard Kvålen](https://github.com/havardk)).
|
||||||
|
* Prevent "Connection refused" in docker during initialization script execution. [#20012](https://github.com/ClickHouse/ClickHouse/pull/20012) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* `EmbeddedRocksDB` is an experimental storage. Fix the issue with lack of proper type checking. Simplified code. This closes [#19967](https://github.com/ClickHouse/ClickHouse/issues/19967). [#19972](https://github.com/ClickHouse/ClickHouse/pull/19972) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix a segfault in function `fromModifiedJulianDay` when the argument type is `Nullable(T)` for any integral types other than Int32. [#19959](https://github.com/ClickHouse/ClickHouse/pull/19959) ([PHO](https://github.com/depressed-pho)).
|
||||||
|
* BloomFilter index crash fix. Fixes [#19757](https://github.com/ClickHouse/ClickHouse/issues/19757). [#19884](https://github.com/ClickHouse/ClickHouse/pull/19884) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Deadlock was possible if system.text_log is enabled. This fixes [#19874](https://github.com/ClickHouse/ClickHouse/issues/19874). [#19875](https://github.com/ClickHouse/ClickHouse/pull/19875) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix starting the server with tables having default expressions containing dictGet(). Allow getting return type of dictGet() without loading dictionary. [#19805](https://github.com/ClickHouse/ClickHouse/pull/19805) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix clickhouse-client abort exception while executing only `select`. [#19790](https://github.com/ClickHouse/ClickHouse/pull/19790) ([taiyang-li](https://github.com/taiyang-li)).
|
||||||
|
* Fix a bug that moving pieces to destination table may failed in case of launching multiple clickhouse-copiers. [#19743](https://github.com/ClickHouse/ClickHouse/pull/19743) ([madianjun](https://github.com/mdianjun)).
|
||||||
|
* Background thread which executes `ON CLUSTER` queries might hang waiting for dropped replicated table to do something. It's fixed. [#19684](https://github.com/ClickHouse/ClickHouse/pull/19684) ([yiguolei](https://github.com/yiguolei)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
|
* Allow to build ClickHouse with AVX-2 enabled globally. It gives slight performance benefits on modern CPUs. Not recommended for production and will not be supported as official build for now. [#20180](https://github.com/ClickHouse/ClickHouse/pull/20180) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix some of the issues found by Coverity. See [#19964](https://github.com/ClickHouse/ClickHouse/issues/19964). [#20010](https://github.com/ClickHouse/ClickHouse/pull/20010) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow to start up with modified binary under gdb. In previous version if you set up breakpoint in gdb before start, server will refuse to start up due to failed integrity check. [#21258](https://github.com/ClickHouse/ClickHouse/pull/21258) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for different compression methods in Kafka. [#21111](https://github.com/ClickHouse/ClickHouse/pull/21111) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fixed port clash from test_storage_kerberized_hdfs test. [#19974](https://github.com/ClickHouse/ClickHouse/pull/19974) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Print `stdout` and `stderr` to log when failed to start docker in integration tests. Before this PR there was a very short error message in this case which didn't help to investigate the problems. [#20631](https://github.com/ClickHouse/ClickHouse/pull/20631) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
|
||||||
|
|
||||||
|
## ClickHouse release 21.2
|
||||||
|
|
||||||
|
### ClickHouse release v21.2.2.8-stable, 2021-02-07
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
|
||||||
|
* Bitwise functions (`bitAnd`, `bitOr`, etc) are forbidden for floating point arguments. Now you have to do explicit cast to integer. [#19853](https://github.com/ClickHouse/ClickHouse/pull/19853) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Forbid `lcm`/`gcd` for floats. [#19532](https://github.com/ClickHouse/ClickHouse/pull/19532) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix memory tracking for `OPTIMIZE TABLE`/merges; account query memory limits and sampling for `OPTIMIZE TABLE`/merges. [#18772](https://github.com/ClickHouse/ClickHouse/pull/18772) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Disallow floating point column as partition key, see [#18421](https://github.com/ClickHouse/ClickHouse/issues/18421#event-4147046255). [#18464](https://github.com/ClickHouse/ClickHouse/pull/18464) ([hexiaoting](https://github.com/hexiaoting)).
|
||||||
|
* Excessive parenthesis in type definitions no longer supported, example: `Array((UInt8))`.
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
|
||||||
|
* Added `PostgreSQL` table engine (both select/insert, with support for multidimensional arrays), also as table function. Added `PostgreSQL` dictionary source. Added `PostgreSQL` database engine. [#18554](https://github.com/ClickHouse/ClickHouse/pull/18554) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Data type `Nested` now supports arbitrary levels of nesting. Introduced subcolumns of complex types, such as `size0` in `Array`, `null` in `Nullable`, names of `Tuple` elements, which can be read without reading of whole column. [#17310](https://github.com/ClickHouse/ClickHouse/pull/17310) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Added `Nullable` support for `FlatDictionary`, `HashedDictionary`, `ComplexKeyHashedDictionary`, `DirectDictionary`, `ComplexKeyDirectDictionary`, `RangeHashedDictionary`. [#18236](https://github.com/ClickHouse/ClickHouse/pull/18236) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Adds a new table called `system.distributed_ddl_queue` that displays the queries in the DDL worker queue. [#17656](https://github.com/ClickHouse/ClickHouse/pull/17656) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||||
|
* Added support of mapping LDAP group names, and attribute values in general, to local roles for users from ldap user directories. [#17211](https://github.com/ClickHouse/ClickHouse/pull/17211) ([Denis Glazachev](https://github.com/traceon)).
|
||||||
|
* Support insert into table function `cluster`, and for both table functions `remote` and `cluster`, support distributing data across nodes by specify sharding key. Close [#16752](https://github.com/ClickHouse/ClickHouse/issues/16752). [#18264](https://github.com/ClickHouse/ClickHouse/pull/18264) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Add function `decodeXMLComponent` to decode characters for XML. Example: `SELECT decodeXMLComponent('Hello,"world"!')` [#17659](https://github.com/ClickHouse/ClickHouse/issues/17659). [#18542](https://github.com/ClickHouse/ClickHouse/pull/18542) ([nauta](https://github.com/nautaa)).
|
||||||
|
* Added functions `parseDateTimeBestEffortUSOrZero`, `parseDateTimeBestEffortUSOrNull`. [#19712](https://github.com/ClickHouse/ClickHouse/pull/19712) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Add `sign` math function. [#19527](https://github.com/ClickHouse/ClickHouse/pull/19527) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Add information about used features (functions, table engines, etc) into system.query_log. [#18495](https://github.com/ClickHouse/ClickHouse/issues/18495). [#19371](https://github.com/ClickHouse/ClickHouse/pull/19371) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Function `formatDateTime` support the `%Q` modification to format date to quarter. [#19224](https://github.com/ClickHouse/ClickHouse/pull/19224) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||||
|
* Support MetaKey+Enter hotkey binding in play UI. [#19012](https://github.com/ClickHouse/ClickHouse/pull/19012) ([sundyli](https://github.com/sundy-li)).
|
||||||
|
* Add three functions for map data type: 1. `mapContains(map, key)` to check weather map.keys include the second parameter key. 2. `mapKeys(map)` return all the keys in Array format 3. `mapValues(map)` return all the values in Array format. [#18788](https://github.com/ClickHouse/ClickHouse/pull/18788) ([hexiaoting](https://github.com/hexiaoting)).
|
||||||
|
* Add `log_comment` setting related to [#18494](https://github.com/ClickHouse/ClickHouse/issues/18494). [#18549](https://github.com/ClickHouse/ClickHouse/pull/18549) ([Zijie Lu](https://github.com/TszKitLo40)).
|
||||||
|
* Add support of tuple argument to `argMin` and `argMax` functions. [#17359](https://github.com/ClickHouse/ClickHouse/pull/17359) ([Ildus Kurbangaliev](https://github.com/ildus)).
|
||||||
|
* Support `EXISTS VIEW` syntax. [#18552](https://github.com/ClickHouse/ClickHouse/pull/18552) ([Du Chuan](https://github.com/spongedu)).
|
||||||
|
* Add `SELECT ALL` syntax. closes [#18706](https://github.com/ClickHouse/ClickHouse/issues/18706). [#18723](https://github.com/ClickHouse/ClickHouse/pull/18723) ([flynn](https://github.com/ucasFL)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
|
||||||
|
* Faster parts removal by lowering the number of `stat` syscalls. This returns the optimization that existed while ago. More safe interface of `IDisk`. This closes [#19065](https://github.com/ClickHouse/ClickHouse/issues/19065). [#19086](https://github.com/ClickHouse/ClickHouse/pull/19086) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Aliases declared in `WITH` statement are properly used in index analysis. Queries like `WITH column AS alias SELECT ... WHERE alias = ...` may use index now. [#18896](https://github.com/ClickHouse/ClickHouse/pull/18896) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Add `optimize_alias_column_prediction` (on by default), that will: - Respect aliased columns in WHERE during partition pruning and skipping data using secondary indexes; - Respect aliased columns in WHERE for trivial count queries for optimize_trivial_count; - Respect aliased columns in GROUP BY/ORDER BY for optimize_aggregation_in_order/optimize_read_in_order. [#16995](https://github.com/ClickHouse/ClickHouse/pull/16995) ([sundyli](https://github.com/sundy-li)).
|
||||||
|
* Speed up aggregate function `sum`. Improvement only visible on synthetic benchmarks and not very practical. [#19216](https://github.com/ClickHouse/ClickHouse/pull/19216) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update libc++ and use another ABI to provide better performance. [#18914](https://github.com/ClickHouse/ClickHouse/pull/18914) ([Danila Kutenin](https://github.com/danlark1)).
|
||||||
|
* Rewrite `sumIf()` and `sum(if())` function to `countIf()` function when logically equivalent. [#17041](https://github.com/ClickHouse/ClickHouse/pull/17041) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Use a connection pool for S3 connections, controlled by the `s3_max_connections` settings. [#13405](https://github.com/ClickHouse/ClickHouse/pull/13405) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
* Add support for zstd long option for better compression of string columns to save space. [#17184](https://github.com/ClickHouse/ClickHouse/pull/17184) ([ygrek](https://github.com/ygrek)).
|
||||||
|
* Slightly improve server latency by removing access to configuration on every connection. [#19863](https://github.com/ClickHouse/ClickHouse/pull/19863) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Reduce lock contention for multiple layers of the `Buffer` engine. [#19379](https://github.com/ClickHouse/ClickHouse/pull/19379) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Support splitting `Filter` step of query plan into `Expression + Filter` pair. Together with `Expression + Expression` merging optimization ([#17458](https://github.com/ClickHouse/ClickHouse/issues/17458)) it may delay execution for some expressions after `Filter` step. [#19253](https://github.com/ClickHouse/ClickHouse/pull/19253) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
|
||||||
|
* `SELECT count() FROM table` now can be executed if only one any column can be selected from the `table`. This PR fixes [#10639](https://github.com/ClickHouse/ClickHouse/issues/10639). [#18233](https://github.com/ClickHouse/ClickHouse/pull/18233) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Set charset to `utf8mb4` when interacting with remote MySQL servers. Fixes [#19795](https://github.com/ClickHouse/ClickHouse/issues/19795). [#19800](https://github.com/ClickHouse/ClickHouse/pull/19800) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* `S3` table function now supports `auto` compression mode (autodetect). This closes [#18754](https://github.com/ClickHouse/ClickHouse/issues/18754). [#19793](https://github.com/ClickHouse/ClickHouse/pull/19793) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
* Correctly output infinite arguments for `formatReadableTimeDelta` function. In previous versions, there was implicit conversion to implementation specific integer value. [#19791](https://github.com/ClickHouse/ClickHouse/pull/19791) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Table function `S3` will use global region if the region can't be determined exactly. This closes [#10998](https://github.com/ClickHouse/ClickHouse/issues/10998). [#19750](https://github.com/ClickHouse/ClickHouse/pull/19750) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
* In distributed queries if the setting `async_socket_for_remote` is enabled, it was possible to get stack overflow at least in debug build configuration if very deeply nested data type is used in table (e.g. `Array(Array(Array(...more...)))`). This fixes [#19108](https://github.com/ClickHouse/ClickHouse/issues/19108). This change introduces minor backward incompatibility: excessive parenthesis in type definitions no longer supported, example: `Array((UInt8))`. [#19736](https://github.com/ClickHouse/ClickHouse/pull/19736) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add separate pool for message brokers (RabbitMQ and Kafka). [#19722](https://github.com/ClickHouse/ClickHouse/pull/19722) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix rare `max_number_of_merges_with_ttl_in_pool` limit overrun (more merges with TTL can be assigned) for non-replicated MergeTree. [#19708](https://github.com/ClickHouse/ClickHouse/pull/19708) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Dictionary: better error message during attribute parsing. [#19678](https://github.com/ClickHouse/ClickHouse/pull/19678) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Add an option to disable validation of checksums on reading. Should never be used in production. Please do not expect any benefits in disabling it. It may only be used for experiments and benchmarks. The setting only applicable for tables of MergeTree family. Checksums are always validated for other table engines and when receiving data over network. In my observations there is no performance difference or it is less than 0.5%. [#19588](https://github.com/ClickHouse/ClickHouse/pull/19588) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support constant result in function `multiIf`. [#19533](https://github.com/ClickHouse/ClickHouse/pull/19533) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Enable function length/empty/notEmpty for datatype Map, which returns keys number in Map. [#19530](https://github.com/ClickHouse/ClickHouse/pull/19530) ([taiyang-li](https://github.com/taiyang-li)).
|
||||||
|
* Add `--reconnect` option to `clickhouse-benchmark`. When this option is specified, it will reconnect before every request. This is needed for testing. [#19872](https://github.com/ClickHouse/ClickHouse/pull/19872) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support using the new location of `.debug` file. This fixes [#19348](https://github.com/ClickHouse/ClickHouse/issues/19348). [#19520](https://github.com/ClickHouse/ClickHouse/pull/19520) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* `toIPv6` function parses `IPv4` addresses. [#19518](https://github.com/ClickHouse/ClickHouse/pull/19518) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||||
|
* Add `http_referer` field to `system.query_log`, `system.processes`, etc. This closes [#19389](https://github.com/ClickHouse/ClickHouse/issues/19389). [#19390](https://github.com/ClickHouse/ClickHouse/pull/19390) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve MySQL compatibility by making more functions case insensitive and adding aliases. [#19387](https://github.com/ClickHouse/ClickHouse/pull/19387) ([Daniil Kondratyev](https://github.com/dankondr)).
|
||||||
|
* Add metrics for MergeTree parts (Wide/Compact/InMemory) types. [#19381](https://github.com/ClickHouse/ClickHouse/pull/19381) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Allow docker to be executed with arbitrary uid. [#19374](https://github.com/ClickHouse/ClickHouse/pull/19374) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fix wrong alignment of values of `IPv4` data type in Pretty formats. They were aligned to the right, not to the left. This closes [#19184](https://github.com/ClickHouse/ClickHouse/issues/19184). [#19339](https://github.com/ClickHouse/ClickHouse/pull/19339) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow change `max_server_memory_usage` without restart. This closes [#18154](https://github.com/ClickHouse/ClickHouse/issues/18154). [#19186](https://github.com/ClickHouse/ClickHouse/pull/19186) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The exception when function `bar` is called with certain NaN argument may be slightly misleading in previous versions. This fixes [#19088](https://github.com/ClickHouse/ClickHouse/issues/19088). [#19107](https://github.com/ClickHouse/ClickHouse/pull/19107) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Explicitly set uid / gid of clickhouse user & group to the fixed values (101) in clickhouse-server images. [#19096](https://github.com/ClickHouse/ClickHouse/pull/19096) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fixed `PeekableReadBuffer: Memory limit exceed` error when inserting data with huge strings. Fixes [#18690](https://github.com/ClickHouse/ClickHouse/issues/18690). [#18979](https://github.com/ClickHouse/ClickHouse/pull/18979) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Docker image: several improvements for clickhouse-server entrypoint. [#18954](https://github.com/ClickHouse/ClickHouse/pull/18954) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Add `normalizeQueryKeepNames` and `normalizedQueryHashKeepNames` to normalize queries without masking long names with `?`. This helps better analyze complex query logs. [#18910](https://github.com/ClickHouse/ClickHouse/pull/18910) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Check per-block checksum of the distributed batch on the sender before sending (without reading the file twice, the checksums will be verified while reading), this will avoid stuck of the INSERT on the receiver (on truncated .bin file on the sender). Avoid reading .bin files twice for batched INSERT (it was required to calculate rows/bytes to take squashing into account, now this information included into the header, backward compatible is preserved). [#18853](https://github.com/ClickHouse/ClickHouse/pull/18853) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix issues with RIGHT and FULL JOIN of tables with aggregate function states. In previous versions exception about `cloneResized` method was thrown. [#18818](https://github.com/ClickHouse/ClickHouse/pull/18818) ([templarzq](https://github.com/templarzq)).
|
||||||
|
* Added prefix-based S3 endpoint settings. [#18812](https://github.com/ClickHouse/ClickHouse/pull/18812) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
* Add [UInt8, UInt16, UInt32, UInt64] arguments types support for bitmapTransform, bitmapSubsetInRange, bitmapSubsetLimit, bitmapContains functions. This closes [#18713](https://github.com/ClickHouse/ClickHouse/issues/18713). [#18791](https://github.com/ClickHouse/ClickHouse/pull/18791) ([sundyli](https://github.com/sundy-li)).
|
||||||
|
* Allow CTE (Common Table Expressions) to be further aliased. Propagate CSE (Common Subexpressions Elimination) to subqueries in the same level when `enable_global_with_statement = 1`. This fixes [#17378](https://github.com/ClickHouse/ClickHouse/issues/17378) . This fixes https://github.com/ClickHouse/ClickHouse/pull/16575#issuecomment-753416235 . [#18684](https://github.com/ClickHouse/ClickHouse/pull/18684) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Update librdkafka to v1.6.0-RC2. Fixes [#18668](https://github.com/ClickHouse/ClickHouse/issues/18668). [#18671](https://github.com/ClickHouse/ClickHouse/pull/18671) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* In case of unexpected exceptions automatically restart background thread which is responsible for execution of distributed DDL queries. Fixes [#17991](https://github.com/ClickHouse/ClickHouse/issues/17991). [#18285](https://github.com/ClickHouse/ClickHouse/pull/18285) ([徐炘](https://github.com/weeds085490)).
|
||||||
|
* Updated AWS C++ SDK in order to utilize global regions in S3. [#17870](https://github.com/ClickHouse/ClickHouse/pull/17870) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
* Added support for `WITH ... [AND] [PERIODIC] REFRESH [interval_in_sec]` clause when creating `LIVE VIEW` tables. [#14822](https://github.com/ClickHouse/ClickHouse/pull/14822) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||||
|
* Restrict `MODIFY TTL` queries for `MergeTree` tables created in old syntax. Previously the query succeeded, but actually it had no effect. [#19064](https://github.com/ClickHouse/ClickHouse/pull/19064) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
|
||||||
|
* Fix index analysis of binary functions with constant argument which leads to wrong query results. This fixes [#18364](https://github.com/ClickHouse/ClickHouse/issues/18364). [#18373](https://github.com/ClickHouse/ClickHouse/pull/18373) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix starting the server with tables having default expressions containing dictGet(). Allow getting return type of dictGet() without loading dictionary. [#19805](https://github.com/ClickHouse/ClickHouse/pull/19805) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix server crash after query with `if` function with `Tuple` type of then/else branches result. `Tuple` type must contain `Array` or another complex type. Fixes [#18356](https://github.com/ClickHouse/ClickHouse/issues/18356). [#20133](https://github.com/ClickHouse/ClickHouse/pull/20133) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* `MaterializeMySQL` (experimental feature): Fix replication for statements that update several tables. [#20066](https://github.com/ClickHouse/ClickHouse/pull/20066) ([Håvard Kvålen](https://github.com/havardk)).
|
||||||
|
* Prevent "Connection refused" in docker during initialization script execution. [#20012](https://github.com/ClickHouse/ClickHouse/pull/20012) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* `EmbeddedRocksDB` is an experimental storage. Fix the issue with lack of proper type checking. Simplified code. This closes [#19967](https://github.com/ClickHouse/ClickHouse/issues/19967). [#19972](https://github.com/ClickHouse/ClickHouse/pull/19972) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix a segfault in function `fromModifiedJulianDay` when the argument type is `Nullable(T)` for any integral types other than Int32. [#19959](https://github.com/ClickHouse/ClickHouse/pull/19959) ([PHO](https://github.com/depressed-pho)).
|
||||||
|
* The function `greatCircleAngle` returned inaccurate results in previous versions. This closes [#19769](https://github.com/ClickHouse/ClickHouse/issues/19769). [#19789](https://github.com/ClickHouse/ClickHouse/pull/19789) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix rare bug when some replicated operations (like mutation) cannot process some parts after data corruption. Fixes [#19593](https://github.com/ClickHouse/ClickHouse/issues/19593). [#19702](https://github.com/ClickHouse/ClickHouse/pull/19702) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Background thread which executes `ON CLUSTER` queries might hang waiting for dropped replicated table to do something. It's fixed. [#19684](https://github.com/ClickHouse/ClickHouse/pull/19684) ([yiguolei](https://github.com/yiguolei)).
|
||||||
|
* Fix wrong deserialization of columns description. It makes INSERT into a table with a column named `\` impossible. [#19479](https://github.com/ClickHouse/ClickHouse/pull/19479) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Mark distributed batch as broken in case of empty data block in one of files. [#19449](https://github.com/ClickHouse/ClickHouse/pull/19449) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed very rare bug that might cause mutation to hang after `DROP/DETACH/REPLACE/MOVE PARTITION`. It was partially fixed by [#15537](https://github.com/ClickHouse/ClickHouse/issues/15537) for the most cases. [#19443](https://github.com/ClickHouse/ClickHouse/pull/19443) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fix possible error `Extremes transform was already added to pipeline`. Fixes [#14100](https://github.com/ClickHouse/ClickHouse/issues/14100). [#19430](https://github.com/ClickHouse/ClickHouse/pull/19430) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix default value in join types with non-zero default (e.g. some Enums). Closes [#18197](https://github.com/ClickHouse/ClickHouse/issues/18197). [#19360](https://github.com/ClickHouse/ClickHouse/pull/19360) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Do not mark file for distributed send as broken on EOF. [#19290](https://github.com/ClickHouse/ClickHouse/pull/19290) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix leaking of pipe fd for `async_socket_for_remote`. [#19153](https://github.com/ClickHouse/ClickHouse/pull/19153) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix infinite reading from file in `ORC` format (was introduced in [#10580](https://github.com/ClickHouse/ClickHouse/issues/10580)). Fixes [#19095](https://github.com/ClickHouse/ClickHouse/issues/19095). [#19134](https://github.com/ClickHouse/ClickHouse/pull/19134) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix issue in merge tree data writer which can lead to marks with bigger size than fixed granularity size. Fixes [#18913](https://github.com/ClickHouse/ClickHouse/issues/18913). [#19123](https://github.com/ClickHouse/ClickHouse/pull/19123) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix startup bug when clickhouse was not able to read compression codec from `LowCardinality(Nullable(...))` and throws exception `Attempt to read after EOF`. Fixes [#18340](https://github.com/ClickHouse/ClickHouse/issues/18340). [#19101](https://github.com/ClickHouse/ClickHouse/pull/19101) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Simplify the implementation of `tupleHammingDistance`. Support for tuples of any equal length. Fixes [#19029](https://github.com/ClickHouse/ClickHouse/issues/19029). [#19084](https://github.com/ClickHouse/ClickHouse/pull/19084) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Make sure `groupUniqArray` returns correct type for argument of Enum type. This closes [#17875](https://github.com/ClickHouse/ClickHouse/issues/17875). [#19019](https://github.com/ClickHouse/ClickHouse/pull/19019) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix possible error `Expected single dictionary argument for function` if use function `ignore` with `LowCardinality` argument. Fixes [#14275](https://github.com/ClickHouse/ClickHouse/issues/14275). [#19016](https://github.com/ClickHouse/ClickHouse/pull/19016) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix inserting of `LowCardinality` column to table with `TinyLog` engine. Fixes [#18629](https://github.com/ClickHouse/ClickHouse/issues/18629). [#19010](https://github.com/ClickHouse/ClickHouse/pull/19010) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix minor issue in JOIN: Join tries to materialize const columns, but our code waits for them in other places. [#18982](https://github.com/ClickHouse/ClickHouse/pull/18982) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Disable `optimize_move_functions_out_of_any` because optimization is not always correct. This closes [#18051](https://github.com/ClickHouse/ClickHouse/issues/18051). This closes [#18973](https://github.com/ClickHouse/ClickHouse/issues/18973). [#18981](https://github.com/ClickHouse/ClickHouse/pull/18981) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix possible exception `QueryPipeline stream: different number of columns` caused by merging of query plan's `Expression` steps. Fixes [#18190](https://github.com/ClickHouse/ClickHouse/issues/18190). [#18980](https://github.com/ClickHouse/ClickHouse/pull/18980) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed very rare deadlock at shutdown. [#18977](https://github.com/ClickHouse/ClickHouse/pull/18977) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fixed rare crashes when server run out of memory. [#18976](https://github.com/ClickHouse/ClickHouse/pull/18976) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fix incorrect behavior when `ALTER TABLE ... DROP PART 'part_name'` query removes all deduplication blocks for the whole partition. Fixes [#18874](https://github.com/ClickHouse/ClickHouse/issues/18874). [#18969](https://github.com/ClickHouse/ClickHouse/pull/18969) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixed issue [#18894](https://github.com/ClickHouse/ClickHouse/issues/18894) Add a check to avoid exception when long column alias('table.column' style, usually auto-generated by BI tools like Looker) equals to long table name. [#18968](https://github.com/ClickHouse/ClickHouse/pull/18968) ([Daniel Qin](https://github.com/mathfool)).
|
||||||
|
* Fix error `Task was not found in task queue` (possible only for remote queries, with `async_socket_for_remote = 1`). [#18964](https://github.com/ClickHouse/ClickHouse/pull/18964) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix bug when mutation with some escaped text (like `ALTER ... UPDATE e = CAST('foo', 'Enum8(\'foo\' = 1')` serialized incorrectly. Fixes [#18878](https://github.com/ClickHouse/ClickHouse/issues/18878). [#18944](https://github.com/ClickHouse/ClickHouse/pull/18944) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* ATTACH PARTITION will reset mutations. [#18804](https://github.com/ClickHouse/ClickHouse/issues/18804). [#18935](https://github.com/ClickHouse/ClickHouse/pull/18935) ([fastio](https://github.com/fastio)).
|
||||||
|
* Fix issue with `bitmapOrCardinality` that may lead to nullptr dereference. This closes [#18911](https://github.com/ClickHouse/ClickHouse/issues/18911). [#18912](https://github.com/ClickHouse/ClickHouse/pull/18912) ([sundyli](https://github.com/sundy-li)).
|
||||||
|
* Fixed `Attempt to read after eof` error when trying to `CAST` `NULL` from `Nullable(String)` to `Nullable(Decimal(P, S))`. Now function `CAST` returns `NULL` when it cannot parse decimal from nullable string. Fixes [#7690](https://github.com/ClickHouse/ClickHouse/issues/7690). [#18718](https://github.com/ClickHouse/ClickHouse/pull/18718) ([Winter Zhang](https://github.com/zhang2014)).
|
||||||
|
* Fix data type convert issue for MySQL engine. [#18124](https://github.com/ClickHouse/ClickHouse/pull/18124) ([bo zeng](https://github.com/mis98zb)).
|
||||||
|
* Fix clickhouse-client abort exception while executing only `select`. [#19790](https://github.com/ClickHouse/ClickHouse/pull/19790) ([taiyang-li](https://github.com/taiyang-li)).
|
||||||
|
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
|
* Run [SQLancer](https://twitter.com/RiggerManuel/status/1352345625480884228) (logical SQL fuzzer) in CI. [#19006](https://github.com/ClickHouse/ClickHouse/pull/19006) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Query Fuzzer will fuzz newly added tests more extensively. This closes [#18916](https://github.com/ClickHouse/ClickHouse/issues/18916). [#19185](https://github.com/ClickHouse/ClickHouse/pull/19185) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Integrate with [Big List of Naughty Strings](https://github.com/minimaxir/big-list-of-naughty-strings/) for better fuzzing. [#19480](https://github.com/ClickHouse/ClickHouse/pull/19480) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add integration tests run with MSan. [#18974](https://github.com/ClickHouse/ClickHouse/pull/18974) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixed MemorySanitizer errors in cyrus-sasl and musl. [#19821](https://github.com/ClickHouse/ClickHouse/pull/19821) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Insuffiient arguments check in `positionCaseInsensitiveUTF8` function triggered address sanitizer. [#19720](https://github.com/ClickHouse/ClickHouse/pull/19720) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove --project-directory for docker-compose in integration test. Fix logs formatting from docker container. [#19706](https://github.com/ClickHouse/ClickHouse/pull/19706) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Made generation of macros.xml easier for integration tests. No more excessive logging from dicttoxml. dicttoxml project is not active for 5+ years. [#19697](https://github.com/ClickHouse/ClickHouse/pull/19697) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Allow to explicitly enable or disable watchdog via environment variable `CLICKHOUSE_WATCHDOG_ENABLE`. By default it is enabled if server is not attached to terminal. [#19522](https://github.com/ClickHouse/ClickHouse/pull/19522) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow building ClickHouse with Kafka support on arm64. [#19369](https://github.com/ClickHouse/ClickHouse/pull/19369) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Allow building librdkafka without ssl. [#19337](https://github.com/ClickHouse/ClickHouse/pull/19337) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Restore Kafka input in FreeBSD builds. [#18924](https://github.com/ClickHouse/ClickHouse/pull/18924) ([Alexandre Snarskii](https://github.com/snar)).
|
||||||
|
* Fix potential nullptr dereference in table function `VALUES`. [#19357](https://github.com/ClickHouse/ClickHouse/pull/19357) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Avoid UBSan reports in `arrayElement` function, `substring` and `arraySum`. Fixes [#19305](https://github.com/ClickHouse/ClickHouse/issues/19305). Fixes [#19287](https://github.com/ClickHouse/ClickHouse/issues/19287). This closes [#19336](https://github.com/ClickHouse/ClickHouse/issues/19336). [#19347](https://github.com/ClickHouse/ClickHouse/pull/19347) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
|
||||||
## ClickHouse release 21.1
|
## ClickHouse release 21.1
|
||||||
|
|
||||||
|
### ClickHouse release v21.1.3.32-stable, 2021-02-03
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
|
||||||
|
* BloomFilter index crash fix. Fixes [#19757](https://github.com/ClickHouse/ClickHouse/issues/19757). [#19884](https://github.com/ClickHouse/ClickHouse/pull/19884) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix crash when pushing down predicates to union distinct subquery. This fixes [#19855](https://github.com/ClickHouse/ClickHouse/issues/19855). [#19861](https://github.com/ClickHouse/ClickHouse/pull/19861) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix filtering by UInt8 greater than 127. [#19799](https://github.com/ClickHouse/ClickHouse/pull/19799) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* In previous versions, unusual arguments for function arrayEnumerateUniq may cause crash or infinite loop. This closes [#19787](https://github.com/ClickHouse/ClickHouse/issues/19787). [#19788](https://github.com/ClickHouse/ClickHouse/pull/19788) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed stack overflow when using accurate comparison of arithmetic type with string type. [#19773](https://github.com/ClickHouse/ClickHouse/pull/19773) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fix crash when nested column name was used in `WHERE` or `PREWHERE`. Fixes [#19755](https://github.com/ClickHouse/ClickHouse/issues/19755). [#19763](https://github.com/ClickHouse/ClickHouse/pull/19763) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix a segmentation fault in `bitmapAndnot` function. Fixes [#19668](https://github.com/ClickHouse/ClickHouse/issues/19668). [#19713](https://github.com/ClickHouse/ClickHouse/pull/19713) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Some functions with big integers may cause segfault. Big integers is experimental feature. This closes [#19667](https://github.com/ClickHouse/ClickHouse/issues/19667). [#19672](https://github.com/ClickHouse/ClickHouse/pull/19672) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix wrong result of function `neighbor` for `LowCardinality` argument. Fixes [#10333](https://github.com/ClickHouse/ClickHouse/issues/10333). [#19617](https://github.com/ClickHouse/ClickHouse/pull/19617) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix use-after-free of the CompressedWriteBuffer in Connection after disconnect. [#19599](https://github.com/ClickHouse/ClickHouse/pull/19599) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* `DROP/DETACH TABLE table ON CLUSTER cluster SYNC` query might hang, it's fixed. Fixes [#19568](https://github.com/ClickHouse/ClickHouse/issues/19568). [#19572](https://github.com/ClickHouse/ClickHouse/pull/19572) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Query CREATE DICTIONARY id expression fix. [#19571](https://github.com/ClickHouse/ClickHouse/pull/19571) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix SIGSEGV with merge_tree_min_rows_for_concurrent_read/merge_tree_min_bytes_for_concurrent_read=0/UINT64_MAX. [#19528](https://github.com/ClickHouse/ClickHouse/pull/19528) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Buffer overflow (on memory read) was possible if `addMonth` function was called with specifically crafted arguments. This fixes [#19441](https://github.com/ClickHouse/ClickHouse/issues/19441). This fixes [#19413](https://github.com/ClickHouse/ClickHouse/issues/19413). [#19472](https://github.com/ClickHouse/ClickHouse/pull/19472) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Uninitialized memory read was possible in encrypt/decrypt functions if empty string was passed as IV. This closes [#19391](https://github.com/ClickHouse/ClickHouse/issues/19391). [#19397](https://github.com/ClickHouse/ClickHouse/pull/19397) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix possible buffer overflow in Uber H3 library. See https://github.com/uber/h3/issues/392. This closes [#19219](https://github.com/ClickHouse/ClickHouse/issues/19219). [#19383](https://github.com/ClickHouse/ClickHouse/pull/19383) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix system.parts _state column (LOGICAL_ERROR when querying this column, due to incorrect order). [#19346](https://github.com/ClickHouse/ClickHouse/pull/19346) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed possible wrong result or segfault on aggregation when Materialized View and its target table have different structure. Fixes [#18063](https://github.com/ClickHouse/ClickHouse/issues/18063). [#19322](https://github.com/ClickHouse/ClickHouse/pull/19322) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fix error `Cannot convert column now64() because it is constant but values of constants are different in source and result`. Continuation of [#7156](https://github.com/ClickHouse/ClickHouse/issues/7156). [#19316](https://github.com/ClickHouse/ClickHouse/pull/19316) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix bug when concurrent `ALTER` and `DROP` queries may hang while processing ReplicatedMergeTree table. [#19237](https://github.com/ClickHouse/ClickHouse/pull/19237) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixed `There is no checkpoint` error when inserting data through http interface using `Template` or `CustomSeparated` format. Fixes [#19021](https://github.com/ClickHouse/ClickHouse/issues/19021). [#19072](https://github.com/ClickHouse/ClickHouse/pull/19072) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Disable constant folding for subqueries on the analysis stage, when the result cannot be calculated. [#18446](https://github.com/ClickHouse/ClickHouse/pull/18446) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Mutation might hang waiting for some non-existent part after `MOVE` or `REPLACE PARTITION` or, in rare cases, after `DETACH` or `DROP PARTITION`. It's fixed. [#15537](https://github.com/ClickHouse/ClickHouse/pull/15537) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### ClickHouse release v21.1.2.15-stable 2021-01-18
|
### ClickHouse release v21.1.2.15-stable 2021-01-18
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
@ -39,6 +39,8 @@ else()
|
|||||||
set(RECONFIGURE_MESSAGE_LEVEL STATUS)
|
set(RECONFIGURE_MESSAGE_LEVEL STATUS)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
enable_language(C CXX ASM)
|
||||||
|
|
||||||
include (cmake/arch.cmake)
|
include (cmake/arch.cmake)
|
||||||
include (cmake/target.cmake)
|
include (cmake/target.cmake)
|
||||||
include (cmake/tools.cmake)
|
include (cmake/tools.cmake)
|
||||||
@ -66,17 +68,30 @@ endif ()
|
|||||||
|
|
||||||
include (cmake/find/ccache.cmake)
|
include (cmake/find/ccache.cmake)
|
||||||
|
|
||||||
option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling" OFF)
|
# Take care to add prlimit in command line before ccache, or else ccache thinks that
|
||||||
|
# prlimit is compiler, and clang++ is its input file, and refuses to work with
|
||||||
|
# multiple inputs, e.g in ccache log:
|
||||||
|
# [2021-03-31T18:06:32.655327 36900] Command line: /usr/bin/ccache prlimit --as=10000000000 --data=5000000000 --cpu=600 /usr/bin/clang++-11 - ...... std=gnu++2a -MD -MT src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -MF src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o.d -o src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -c ../src/Storages/MergeTree/IMergeTreeDataPart.cpp
|
||||||
|
#
|
||||||
|
# [2021-03-31T18:06:32.656704 36900] Multiple input files: /usr/bin/clang++-11 and ../src/Storages/MergeTree/IMergeTreeDataPart.cpp
|
||||||
|
#
|
||||||
|
# Another way would be to use --ccache-skip option before clang++-11 to make
|
||||||
|
# ccache ignore it.
|
||||||
|
option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling." OFF)
|
||||||
if (ENABLE_CHECK_HEAVY_BUILDS)
|
if (ENABLE_CHECK_HEAVY_BUILDS)
|
||||||
# set DATA (since RSS does not work since 2.6.x+) to 2G
|
# set DATA (since RSS does not work since 2.6.x+) to 2G
|
||||||
set (RLIMIT_DATA 5000000000)
|
set (RLIMIT_DATA 5000000000)
|
||||||
# set VIRT (RLIMIT_AS) to 10G (DATA*10)
|
# set VIRT (RLIMIT_AS) to 10G (DATA*10)
|
||||||
set (RLIMIT_AS 10000000000)
|
set (RLIMIT_AS 10000000000)
|
||||||
|
# set CPU time limit to 600 seconds
|
||||||
|
set (RLIMIT_CPU 600)
|
||||||
|
|
||||||
# gcc10/gcc10/clang -fsanitize=memory is too heavy
|
# gcc10/gcc10/clang -fsanitize=memory is too heavy
|
||||||
if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
|
if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
|
||||||
set (RLIMIT_DATA 10000000000)
|
set (RLIMIT_DATA 10000000000)
|
||||||
endif()
|
endif()
|
||||||
set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=600)
|
|
||||||
|
set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None")
|
if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None")
|
||||||
@ -155,7 +170,6 @@ option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests"
|
|||||||
|
|
||||||
if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
|
if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
|
||||||
# Only for Linux, x86_64.
|
# Only for Linux, x86_64.
|
||||||
# Implies ${ENABLE_FASTMEMCPY}
|
|
||||||
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
|
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
|
||||||
elseif(GLIBC_COMPATIBILITY)
|
elseif(GLIBC_COMPATIBILITY)
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")
|
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")
|
||||||
@ -169,7 +183,7 @@ endif ()
|
|||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
|
||||||
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
|
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-12" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
|
||||||
if (OBJCOPY_PATH)
|
if (OBJCOPY_PATH)
|
||||||
message(STATUS "Using objcopy: ${OBJCOPY_PATH}.")
|
message(STATUS "Using objcopy: ${OBJCOPY_PATH}.")
|
||||||
|
|
||||||
@ -241,35 +255,52 @@ else()
|
|||||||
message(STATUS "Disabling compiler -pipe option (have only ${AVAILABLE_PHYSICAL_MEMORY} mb of memory)")
|
message(STATUS "Disabling compiler -pipe option (have only ${AVAILABLE_PHYSICAL_MEMORY} mb of memory)")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT DISABLE_CPU_OPTIMIZE)
|
include(cmake/cpu_features.cmake)
|
||||||
include(cmake/cpu_features.cmake)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
option(ARCH_NATIVE "Add -march=native compiler flag")
|
option(ARCH_NATIVE "Add -march=native compiler flag. This makes your binaries non-portable but more performant code may be generated.")
|
||||||
|
|
||||||
if (ARCH_NATIVE)
|
if (ARCH_NATIVE)
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
# Asynchronous unwind tables are needed for Query Profiler.
|
||||||
# to make numeric_limits<__int128> works with GCC
|
# They are already by default on some platforms but possibly not on all platforms.
|
||||||
set (_CXX_STANDARD "gnu++2a")
|
# Enable it explicitly.
|
||||||
else()
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
|
||||||
set (_CXX_STANDARD "c++2a")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# cmake < 3.12 doesn't support 20. We'll set CMAKE_CXX_FLAGS for now
|
if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
|
||||||
# set (CMAKE_CXX_STANDARD 20)
|
# CMake < 3.12 doesn't support setting 20 as a C++ standard version.
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
|
# We will add C++ standard controlling flag in CMAKE_CXX_FLAGS manually for now.
|
||||||
|
|
||||||
set (CMAKE_CXX_EXTENSIONS 0) # https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html#prop_tgt:CXX_EXTENSIONS
|
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||||
set (CMAKE_CXX_STANDARD_REQUIRED ON)
|
# to make numeric_limits<__int128> works with GCC
|
||||||
|
set (_CXX_STANDARD "gnu++2a")
|
||||||
|
else ()
|
||||||
|
set (_CXX_STANDARD "c++2a")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
|
||||||
|
else ()
|
||||||
|
set (CMAKE_CXX_STANDARD 20)
|
||||||
|
set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html
|
||||||
|
set (CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
set (CMAKE_C_STANDARD 11)
|
||||||
|
set (CMAKE_C_EXTENSIONS ON)
|
||||||
|
set (CMAKE_C_STANDARD_REQUIRED ON)
|
||||||
|
|
||||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||||
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
|
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
|
||||||
|
# benchmarks.
|
||||||
|
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||||
|
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
|
||||||
|
endif ()
|
||||||
|
|
||||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
||||||
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
||||||
|
|
||||||
@ -331,7 +362,7 @@ if (COMPILER_CLANG)
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Always prefer llvm tools when using clang. For instance, we cannot use GNU ar when llvm LTO is enabled
|
# Always prefer llvm tools when using clang. For instance, we cannot use GNU ar when llvm LTO is enabled
|
||||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
|
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
|
||||||
|
|
||||||
if (LLVM_AR_PATH)
|
if (LLVM_AR_PATH)
|
||||||
message(STATUS "Using llvm-ar: ${LLVM_AR_PATH}.")
|
message(STATUS "Using llvm-ar: ${LLVM_AR_PATH}.")
|
||||||
@ -340,7 +371,7 @@ if (COMPILER_CLANG)
|
|||||||
message(WARNING "Cannot find llvm-ar. System ar will be used instead. It does not work with ThinLTO.")
|
message(WARNING "Cannot find llvm-ar. System ar will be used instead. It does not work with ThinLTO.")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9" "llvm-ranlib-8")
|
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9" "llvm-ranlib-8")
|
||||||
|
|
||||||
if (LLVM_RANLIB_PATH)
|
if (LLVM_RANLIB_PATH)
|
||||||
message(STATUS "Using llvm-ranlib: ${LLVM_RANLIB_PATH}.")
|
message(STATUS "Using llvm-ranlib: ${LLVM_RANLIB_PATH}.")
|
||||||
@ -457,6 +488,7 @@ find_contrib_lib(double-conversion) # Must be before parquet
|
|||||||
include (cmake/find/ssl.cmake)
|
include (cmake/find/ssl.cmake)
|
||||||
include (cmake/find/ldap.cmake) # after ssl
|
include (cmake/find/ldap.cmake) # after ssl
|
||||||
include (cmake/find/icu.cmake)
|
include (cmake/find/icu.cmake)
|
||||||
|
include (cmake/find/xz.cmake)
|
||||||
include (cmake/find/zlib.cmake)
|
include (cmake/find/zlib.cmake)
|
||||||
include (cmake/find/zstd.cmake)
|
include (cmake/find/zstd.cmake)
|
||||||
include (cmake/find/ltdl.cmake) # for odbc
|
include (cmake/find/ltdl.cmake) # for odbc
|
||||||
@ -489,6 +521,7 @@ include (cmake/find/fast_float.cmake)
|
|||||||
include (cmake/find/rapidjson.cmake)
|
include (cmake/find/rapidjson.cmake)
|
||||||
include (cmake/find/fastops.cmake)
|
include (cmake/find/fastops.cmake)
|
||||||
include (cmake/find/odbc.cmake)
|
include (cmake/find/odbc.cmake)
|
||||||
|
include (cmake/find/nanodbc.cmake)
|
||||||
include (cmake/find/rocksdb.cmake)
|
include (cmake/find/rocksdb.cmake)
|
||||||
include (cmake/find/libpqxx.cmake)
|
include (cmake/find/libpqxx.cmake)
|
||||||
include (cmake/find/nuraft.cmake)
|
include (cmake/find/nuraft.cmake)
|
||||||
@ -504,6 +537,7 @@ include (cmake/find/msgpack.cmake)
|
|||||||
include (cmake/find/cassandra.cmake)
|
include (cmake/find/cassandra.cmake)
|
||||||
include (cmake/find/sentry.cmake)
|
include (cmake/find/sentry.cmake)
|
||||||
include (cmake/find/stats.cmake)
|
include (cmake/find/stats.cmake)
|
||||||
|
include (cmake/find/datasketches.cmake)
|
||||||
|
|
||||||
set (USE_INTERNAL_CITYHASH_LIBRARY ON CACHE INTERNAL "")
|
set (USE_INTERNAL_CITYHASH_LIBRARY ON CACHE INTERNAL "")
|
||||||
find_contrib_lib(cityhash)
|
find_contrib_lib(cityhash)
|
||||||
@ -536,7 +570,7 @@ macro (add_executable target)
|
|||||||
# explicitly acquire and interpose malloc symbols by clickhouse_malloc
|
# explicitly acquire and interpose malloc symbols by clickhouse_malloc
|
||||||
# if GLIBC_COMPATIBILITY is ON and ENABLE_THINLTO is on than provide memcpy symbol explicitly to neutrialize thinlto's libcall generation.
|
# if GLIBC_COMPATIBILITY is ON and ENABLE_THINLTO is on than provide memcpy symbol explicitly to neutrialize thinlto's libcall generation.
|
||||||
if (GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
|
if (GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
|
||||||
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:clickhouse_memcpy>)
|
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:memcpy>)
|
||||||
else ()
|
else ()
|
||||||
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc>)
|
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc>)
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -8,12 +8,8 @@ ClickHouse® is an open-source column-oriented database management system that a
|
|||||||
* [Tutorial](https://clickhouse.tech/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster.
|
* [Tutorial](https://clickhouse.tech/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster.
|
||||||
* [Documentation](https://clickhouse.tech/docs/en/) provides more in-depth information.
|
* [Documentation](https://clickhouse.tech/docs/en/) provides more in-depth information.
|
||||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||||
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-d2zxkf9e-XyxDa_ucfPxzuH4SJIm~Ng) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
|
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-nwwakmk4-xOJ6cdy0sJC3It8j348~IA) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
|
||||||
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||||
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
|
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
|
||||||
* [Yandex.Messenger channel](https://yandex.ru/chat/#/join/20e380d9-c7be-4123-ab06-e95fb946975e) shares announcements and useful links in Russian.
|
|
||||||
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
|
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
|
||||||
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
|
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
|
||||||
|
|
||||||
## Upcoming Events
|
|
||||||
* [Chinese ClickHouse Meetup (online)](http://hdxu.cn/8KxZE) on 6 February 2021.
|
|
||||||
|
@ -8,6 +8,7 @@ add_subdirectory (loggers)
|
|||||||
add_subdirectory (pcg-random)
|
add_subdirectory (pcg-random)
|
||||||
add_subdirectory (widechar_width)
|
add_subdirectory (widechar_width)
|
||||||
add_subdirectory (readpassphrase)
|
add_subdirectory (readpassphrase)
|
||||||
|
add_subdirectory (bridge)
|
||||||
|
|
||||||
if (USE_MYSQL)
|
if (USE_MYSQL)
|
||||||
add_subdirectory (mysqlxx)
|
add_subdirectory (mysqlxx)
|
||||||
|
7
base/bridge/CMakeLists.txt
Normal file
7
base/bridge/CMakeLists.txt
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
add_library (bridge
|
||||||
|
IBridge.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
target_include_directories (daemon PUBLIC ..)
|
||||||
|
target_link_libraries (bridge PRIVATE daemon dbms Poco::Data Poco::Data::ODBC)
|
||||||
|
|
238
base/bridge/IBridge.cpp
Normal file
238
base/bridge/IBridge.cpp
Normal file
@ -0,0 +1,238 @@
|
|||||||
|
#include "IBridge.h"
|
||||||
|
|
||||||
|
#include <IO/ReadHelpers.h>
|
||||||
|
#include <boost/program_options.hpp>
|
||||||
|
#include <Poco/Net/NetException.h>
|
||||||
|
#include <Poco/Util/HelpFormatter.h>
|
||||||
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
|
#include <Formats/registerFormats.h>
|
||||||
|
#include <common/logger_useful.h>
|
||||||
|
#include <Common/SensitiveDataMasker.h>
|
||||||
|
#include <Server/HTTP/HTTPServer.h>
|
||||||
|
|
||||||
|
#if USE_ODBC
|
||||||
|
# include <Poco/Data/ODBC/Connector.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log)
|
||||||
|
{
|
||||||
|
Poco::Net::SocketAddress socket_address;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
socket_address = Poco::Net::SocketAddress(host, port);
|
||||||
|
}
|
||||||
|
catch (const Poco::Net::DNSException & e)
|
||||||
|
{
|
||||||
|
const auto code = e.code();
|
||||||
|
if (code == EAI_FAMILY
|
||||||
|
#if defined(EAI_ADDRFAMILY)
|
||||||
|
|| code == EAI_ADDRFAMILY
|
||||||
|
#endif
|
||||||
|
)
|
||||||
|
{
|
||||||
|
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. If it is an IPv6 address and your host has disabled IPv6, then consider to specify IPv4 address to listen in <listen_host> element of configuration file. Example: <listen_host>0.0.0.0</listen_host>", host, e.code(), e.message());
|
||||||
|
}
|
||||||
|
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
return socket_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, Poco::Logger * log)
|
||||||
|
{
|
||||||
|
auto address = makeSocketAddress(host, port, log);
|
||||||
|
#if POCO_VERSION < 0x01080000
|
||||||
|
socket.bind(address, /* reuseAddress = */ true);
|
||||||
|
#else
|
||||||
|
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ false);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
socket.listen(/* backlog = */ 64);
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void IBridge::handleHelp(const std::string &, const std::string &)
|
||||||
|
{
|
||||||
|
Poco::Util::HelpFormatter help_formatter(options());
|
||||||
|
help_formatter.setCommand(commandName());
|
||||||
|
help_formatter.setHeader("HTTP-proxy for odbc requests");
|
||||||
|
help_formatter.setUsage("--http-port <port>");
|
||||||
|
help_formatter.format(std::cerr);
|
||||||
|
|
||||||
|
stopOptionsProcessing();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void IBridge::defineOptions(Poco::Util::OptionSet & options)
|
||||||
|
{
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("http-port", "", "port to listen").argument("http-port", true) .binding("http-port"));
|
||||||
|
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("listen-host", "", "hostname or address to listen, default 127.0.0.1").argument("listen-host").binding("listen-host"));
|
||||||
|
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("http-timeout", "", "http timeout for socket, default 1800").argument("http-timeout").binding("http-timeout"));
|
||||||
|
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("max-server-connections", "", "max connections to server, default 1024").argument("max-server-connections").binding("max-server-connections"));
|
||||||
|
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("keep-alive-timeout", "", "keepalive timeout, default 10").argument("keep-alive-timeout").binding("keep-alive-timeout"));
|
||||||
|
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("log-level", "", "sets log level, default info") .argument("log-level").binding("logger.level"));
|
||||||
|
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("log-path", "", "log path for all logs, default console").argument("log-path").binding("logger.log"));
|
||||||
|
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("err-log-path", "", "err log path for all logs, default no").argument("err-log-path").binding("logger.errorlog"));
|
||||||
|
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("stdout-path", "", "stdout log path, default console").argument("stdout-path").binding("logger.stdout"));
|
||||||
|
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("stderr-path", "", "stderr log path, default console").argument("stderr-path").binding("logger.stderr"));
|
||||||
|
|
||||||
|
using Me = std::decay_t<decltype(*this)>;
|
||||||
|
|
||||||
|
options.addOption(
|
||||||
|
Poco::Util::Option("help", "", "produce this help message").binding("help").callback(Poco::Util::OptionCallback<Me>(this, &Me::handleHelp)));
|
||||||
|
|
||||||
|
ServerApplication::defineOptions(options); // NOLINT Don't need complex BaseDaemon's .xml config
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void IBridge::initialize(Application & self)
|
||||||
|
{
|
||||||
|
BaseDaemon::closeFDs();
|
||||||
|
is_help = config().has("help");
|
||||||
|
|
||||||
|
if (is_help)
|
||||||
|
return;
|
||||||
|
|
||||||
|
config().setString("logger", bridgeName());
|
||||||
|
|
||||||
|
/// Redirect stdout, stderr to specified files.
|
||||||
|
/// Some libraries and sanitizers write to stderr in case of errors.
|
||||||
|
const auto stdout_path = config().getString("logger.stdout", "");
|
||||||
|
if (!stdout_path.empty())
|
||||||
|
{
|
||||||
|
if (!freopen(stdout_path.c_str(), "a+", stdout))
|
||||||
|
throw Poco::OpenFileException("Cannot attach stdout to " + stdout_path);
|
||||||
|
|
||||||
|
/// Disable buffering for stdout.
|
||||||
|
setbuf(stdout, nullptr);
|
||||||
|
}
|
||||||
|
const auto stderr_path = config().getString("logger.stderr", "");
|
||||||
|
if (!stderr_path.empty())
|
||||||
|
{
|
||||||
|
if (!freopen(stderr_path.c_str(), "a+", stderr))
|
||||||
|
throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path);
|
||||||
|
|
||||||
|
/// Disable buffering for stderr.
|
||||||
|
setbuf(stderr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
buildLoggers(config(), logger(), self.commandName());
|
||||||
|
|
||||||
|
BaseDaemon::logRevision();
|
||||||
|
|
||||||
|
log = &logger();
|
||||||
|
hostname = config().getString("listen-host", "127.0.0.1");
|
||||||
|
port = config().getUInt("http-port");
|
||||||
|
if (port > 0xFFFF)
|
||||||
|
throw Exception("Out of range 'http-port': " + std::to_string(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||||
|
|
||||||
|
http_timeout = config().getUInt("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
|
||||||
|
max_server_connections = config().getUInt("max-server-connections", 1024);
|
||||||
|
keep_alive_timeout = config().getUInt("keep-alive-timeout", 10);
|
||||||
|
|
||||||
|
initializeTerminationAndSignalProcessing();
|
||||||
|
|
||||||
|
#if USE_ODBC
|
||||||
|
if (bridgeName() == "ODBCBridge")
|
||||||
|
Poco::Data::ODBC::Connector::registerConnector();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ServerApplication::initialize(self); // NOLINT
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void IBridge::uninitialize()
|
||||||
|
{
|
||||||
|
BaseDaemon::uninitialize();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int IBridge::main(const std::vector<std::string> & /*args*/)
|
||||||
|
{
|
||||||
|
if (is_help)
|
||||||
|
return Application::EXIT_OK;
|
||||||
|
|
||||||
|
registerFormats();
|
||||||
|
LOG_INFO(log, "Starting up {} on host: {}, port: {}", bridgeName(), hostname, port);
|
||||||
|
|
||||||
|
Poco::Net::ServerSocket socket;
|
||||||
|
auto address = socketBindListen(socket, hostname, port, log);
|
||||||
|
socket.setReceiveTimeout(http_timeout);
|
||||||
|
socket.setSendTimeout(http_timeout);
|
||||||
|
|
||||||
|
Poco::ThreadPool server_pool(3, max_server_connections);
|
||||||
|
|
||||||
|
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||||
|
http_params->setTimeout(http_timeout);
|
||||||
|
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
||||||
|
|
||||||
|
auto shared_context = Context::createShared();
|
||||||
|
auto context = Context::createGlobal(shared_context.get());
|
||||||
|
context->makeGlobalContext();
|
||||||
|
|
||||||
|
if (config().has("query_masking_rules"))
|
||||||
|
SensitiveDataMasker::setInstance(std::make_unique<SensitiveDataMasker>(config(), "query_masking_rules"));
|
||||||
|
|
||||||
|
auto server = HTTPServer(
|
||||||
|
context,
|
||||||
|
getHandlerFactoryPtr(context),
|
||||||
|
server_pool,
|
||||||
|
socket,
|
||||||
|
http_params);
|
||||||
|
|
||||||
|
SCOPE_EXIT({
|
||||||
|
LOG_DEBUG(log, "Received termination signal.");
|
||||||
|
LOG_DEBUG(log, "Waiting for current connections to close.");
|
||||||
|
|
||||||
|
server.stop();
|
||||||
|
|
||||||
|
for (size_t count : ext::range(1, 6))
|
||||||
|
{
|
||||||
|
if (server.currentConnections() == 0)
|
||||||
|
break;
|
||||||
|
LOG_DEBUG(log, "Waiting for {} connections, try {}", server.currentConnections(), count);
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
server.start();
|
||||||
|
LOG_INFO(log, "Listening http://{}", address.toString());
|
||||||
|
|
||||||
|
waitForTerminationRequest();
|
||||||
|
return Application::EXIT_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
51
base/bridge/IBridge.h
Normal file
51
base/bridge/IBridge.h
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Server/HTTP/HTTPRequestHandlerFactory.h>
|
||||||
|
#include <daemon/BaseDaemon.h>
|
||||||
|
|
||||||
|
#include <Poco/Logger.h>
|
||||||
|
#include <Poco/Util/ServerApplication.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/// Class represents base for clickhouse-odbc-bridge and clickhouse-library-bridge servers.
|
||||||
|
/// Listens to incoming HTTP POST and GET requests on specified port and host.
|
||||||
|
/// Has two handlers '/' for all incoming POST requests and /ping for GET request about service status.
|
||||||
|
class IBridge : public BaseDaemon
|
||||||
|
{
|
||||||
|
|
||||||
|
public:
|
||||||
|
/// Define command line arguments
|
||||||
|
void defineOptions(Poco::Util::OptionSet & options) override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
using HandlerFactoryPtr = std::shared_ptr<HTTPRequestHandlerFactory>;
|
||||||
|
|
||||||
|
void initialize(Application & self) override;
|
||||||
|
|
||||||
|
void uninitialize() override;
|
||||||
|
|
||||||
|
int main(const std::vector<std::string> & args) override;
|
||||||
|
|
||||||
|
virtual std::string bridgeName() const = 0;
|
||||||
|
|
||||||
|
virtual HandlerFactoryPtr getHandlerFactoryPtr(ContextPtr context) const = 0;
|
||||||
|
|
||||||
|
size_t keep_alive_timeout;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void handleHelp(const std::string &, const std::string &);
|
||||||
|
|
||||||
|
bool is_help;
|
||||||
|
std::string hostname;
|
||||||
|
size_t port;
|
||||||
|
std::string log_level;
|
||||||
|
size_t max_server_connections;
|
||||||
|
size_t http_timeout;
|
||||||
|
|
||||||
|
Poco::Logger * log;
|
||||||
|
};
|
||||||
|
}
|
156
base/common/BorrowedObjectPool.h
Normal file
156
base/common/BorrowedObjectPool.h
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <vector>
|
||||||
|
#include <chrono>
|
||||||
|
#include <mutex>
|
||||||
|
#include <condition_variable>
|
||||||
|
|
||||||
|
#include <common/defines.h>
|
||||||
|
#include <common/MoveOrCopyIfThrow.h>
|
||||||
|
|
||||||
|
/** Pool for limited size objects that cannot be used from different threads simultaneously.
|
||||||
|
* The main use case is to have fixed size of objects that can be reused in difference threads during their lifetime
|
||||||
|
* and have to be initialized on demand.
|
||||||
|
* Two main properties of pool are allocated objects size and borrowed objects size.
|
||||||
|
* Allocated objects size is size of objects that are currently allocated by the pool.
|
||||||
|
* Borrowed objects size is size of objects that are borrowed by clients.
|
||||||
|
* If max_size == 0 then pool has unlimited size and objects will be allocated without limit.
|
||||||
|
*
|
||||||
|
* Pool provides following strategy for borrowing object:
|
||||||
|
* If max_size == 0 then pool has unlimited size and objects will be allocated without limit.
|
||||||
|
* 1. If pool has objects that can be borrowed increase borrowed objects size and return it.
|
||||||
|
* 2. If pool allocatedObjectsSize is lower than max objects size or pool has unlimited size
|
||||||
|
* allocate new object, increase borrowed objects size and return it.
|
||||||
|
* 3. If pool is full wait on condition variable with or without timeout until some object
|
||||||
|
* will be returned to the pool.
|
||||||
|
*/
|
||||||
|
template <typename T>
|
||||||
|
class BorrowedObjectPool final
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit BorrowedObjectPool(size_t max_size_) : max_size(max_size_) {}
|
||||||
|
|
||||||
|
/// Borrow object from pool. If pull is full and all objects were borrowed
|
||||||
|
/// then calling thread will wait until some object will be returned into pool.
|
||||||
|
template <typename FactoryFunc>
|
||||||
|
void borrowObject(T & dest, FactoryFunc && func)
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> lock(objects_mutex);
|
||||||
|
|
||||||
|
if (!objects.empty())
|
||||||
|
{
|
||||||
|
dest = borrowFromObjects(lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool has_unlimited_size = (max_size == 0);
|
||||||
|
|
||||||
|
if (unlikely(has_unlimited_size) || allocated_objects_size < max_size)
|
||||||
|
{
|
||||||
|
dest = allocateObjectForBorrowing(lock, std::forward<FactoryFunc>(func));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
condition_variable.wait(lock, [this] { return !objects.empty(); });
|
||||||
|
dest = borrowFromObjects(lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Same as borrowObject function, but wait with timeout.
|
||||||
|
/// Returns true if object was borrowed during timeout.
|
||||||
|
template <typename FactoryFunc>
|
||||||
|
bool tryBorrowObject(T & dest, FactoryFunc && func, size_t timeout_in_milliseconds = 0)
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> lock(objects_mutex);
|
||||||
|
|
||||||
|
if (!objects.empty())
|
||||||
|
{
|
||||||
|
dest = borrowFromObjects(lock);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool has_unlimited_size = (max_size == 0);
|
||||||
|
|
||||||
|
if (unlikely(has_unlimited_size) || allocated_objects_size < max_size)
|
||||||
|
{
|
||||||
|
dest = allocateObjectForBorrowing(lock, std::forward<FactoryFunc>(func));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool wait_result = condition_variable.wait_for(lock, std::chrono::milliseconds(timeout_in_milliseconds), [this] { return !objects.empty(); });
|
||||||
|
|
||||||
|
if (wait_result)
|
||||||
|
dest = borrowFromObjects(lock);
|
||||||
|
|
||||||
|
return wait_result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return object into pool. Client must return same object that was borrowed.
|
||||||
|
inline void returnObject(T && object_to_return)
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> lck(objects_mutex);
|
||||||
|
|
||||||
|
objects.emplace_back(std::move(object_to_return));
|
||||||
|
--borrowed_objects_size;
|
||||||
|
|
||||||
|
condition_variable.notify_one();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Max pool size
|
||||||
|
inline size_t maxSize() const
|
||||||
|
{
|
||||||
|
return max_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocated objects size by the pool. If allocatedObjectsSize == maxSize then pool is full.
|
||||||
|
inline size_t allocatedObjectsSize() const
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> lock(objects_mutex);
|
||||||
|
return allocated_objects_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns allocatedObjectsSize == maxSize
|
||||||
|
inline bool isFull() const
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> lock(objects_mutex);
|
||||||
|
return allocated_objects_size == max_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Borrowed objects size. If borrowedObjectsSize == allocatedObjectsSize and pool is full.
|
||||||
|
/// Then client will wait during borrowObject function call.
|
||||||
|
inline size_t borrowedObjectsSize() const
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> lock(objects_mutex);
|
||||||
|
return borrowed_objects_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
template <typename FactoryFunc>
|
||||||
|
inline T allocateObjectForBorrowing(const std::unique_lock<std::mutex> &, FactoryFunc && func)
|
||||||
|
{
|
||||||
|
++allocated_objects_size;
|
||||||
|
++borrowed_objects_size;
|
||||||
|
|
||||||
|
return std::forward<FactoryFunc>(func)();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline T borrowFromObjects(const std::unique_lock<std::mutex> &)
|
||||||
|
{
|
||||||
|
T dst;
|
||||||
|
detail::moveOrCopyIfThrow(std::move(objects.back()), dst);
|
||||||
|
objects.pop_back();
|
||||||
|
|
||||||
|
++borrowed_objects_size;
|
||||||
|
|
||||||
|
return dst;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t max_size;
|
||||||
|
|
||||||
|
mutable std::mutex objects_mutex;
|
||||||
|
std::condition_variable condition_variable;
|
||||||
|
size_t allocated_objects_size = 0;
|
||||||
|
size_t borrowed_objects_size = 0;
|
||||||
|
std::vector<T> objects;
|
||||||
|
};
|
@ -29,7 +29,7 @@ elseif (ENABLE_READLINE)
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (USE_DEBUG_HELPERS)
|
if (USE_DEBUG_HELPERS)
|
||||||
set (INCLUDE_DEBUG_HELPERS "-include ${ClickHouse_SOURCE_DIR}/base/common/iostream_debug_helpers.h")
|
set (INCLUDE_DEBUG_HELPERS "-include \"${ClickHouse_SOURCE_DIR}/base/common/iostream_debug_helpers.h\"")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
@ -45,7 +45,11 @@ if (USE_INTERNAL_CCTZ)
|
|||||||
set_source_files_properties(DateLUTImpl.cpp PROPERTIES COMPILE_DEFINITIONS USE_INTERNAL_CCTZ)
|
set_source_files_properties(DateLUTImpl.cpp PROPERTIES COMPILE_DEFINITIONS USE_INTERNAL_CCTZ)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
target_include_directories(common PUBLIC .. ${CMAKE_CURRENT_BINARY_DIR}/..)
|
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
|
||||||
|
|
||||||
|
if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
|
||||||
|
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
|
||||||
|
endif()
|
||||||
|
|
||||||
# Allow explicit fallback to readline
|
# Allow explicit fallback to readline
|
||||||
if (NOT ENABLE_REPLXX AND ENABLE_READLINE)
|
if (NOT ENABLE_REPLXX AND ENABLE_READLINE)
|
||||||
@ -74,7 +78,6 @@ target_link_libraries (common
|
|||||||
${CITYHASH_LIBRARIES}
|
${CITYHASH_LIBRARIES}
|
||||||
boost::headers_only
|
boost::headers_only
|
||||||
boost::system
|
boost::system
|
||||||
FastMemcpy
|
|
||||||
Poco::Net
|
Poco::Net
|
||||||
Poco::Net::SSL
|
Poco::Net::SSL
|
||||||
Poco::Util
|
Poco::Util
|
||||||
|
@ -152,7 +152,7 @@ const DateLUTImpl & DateLUT::getImplementation(const std::string & time_zone) co
|
|||||||
|
|
||||||
auto it = impls.emplace(time_zone, nullptr).first;
|
auto it = impls.emplace(time_zone, nullptr).first;
|
||||||
if (!it->second)
|
if (!it->second)
|
||||||
it->second = std::make_unique<DateLUTImpl>(time_zone);
|
it->second = std::unique_ptr<DateLUTImpl>(new DateLUTImpl(time_zone));
|
||||||
|
|
||||||
return *it->second;
|
return *it->second;
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,6 @@ public:
|
|||||||
|
|
||||||
return date_lut.getImplementation(time_zone);
|
return date_lut.getImplementation(time_zone);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setDefaultTimezone(const std::string & time_zone)
|
static void setDefaultTimezone(const std::string & time_zone)
|
||||||
{
|
{
|
||||||
auto & date_lut = getInstance();
|
auto & date_lut = getInstance();
|
||||||
|
@ -46,24 +46,41 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
|
|||||||
if (&inside_main)
|
if (&inside_main)
|
||||||
assert(inside_main);
|
assert(inside_main);
|
||||||
|
|
||||||
size_t i = 0;
|
|
||||||
time_t start_of_day = 0;
|
|
||||||
|
|
||||||
cctz::time_zone cctz_time_zone;
|
cctz::time_zone cctz_time_zone;
|
||||||
if (!cctz::load_time_zone(time_zone, &cctz_time_zone))
|
if (!cctz::load_time_zone(time_zone, &cctz_time_zone))
|
||||||
throw Poco::Exception("Cannot load time zone " + time_zone_);
|
throw Poco::Exception("Cannot load time zone " + time_zone_);
|
||||||
|
|
||||||
cctz::time_zone::absolute_lookup start_of_epoch_lookup = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(start_of_day));
|
constexpr cctz::civil_day epoch{1970, 1, 1};
|
||||||
offset_at_start_of_epoch = start_of_epoch_lookup.offset;
|
constexpr cctz::civil_day lut_start{DATE_LUT_MIN_YEAR, 1, 1};
|
||||||
offset_is_whole_number_of_hours_everytime = true;
|
time_t start_of_day;
|
||||||
|
|
||||||
cctz::civil_day date{1970, 1, 1};
|
/// Note: it's validated against all timezones in the system.
|
||||||
|
static_assert((epoch - lut_start) == daynum_offset_epoch);
|
||||||
|
|
||||||
|
offset_at_start_of_epoch = cctz_time_zone.lookup(cctz_time_zone.lookup(epoch).pre).offset;
|
||||||
|
offset_at_start_of_lut = cctz_time_zone.lookup(cctz_time_zone.lookup(lut_start).pre).offset;
|
||||||
|
offset_is_whole_number_of_hours_during_epoch = true;
|
||||||
|
|
||||||
|
cctz::civil_day date = lut_start;
|
||||||
|
|
||||||
|
UInt32 i = 0;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
cctz::time_zone::civil_lookup lookup = cctz_time_zone.lookup(date);
|
cctz::time_zone::civil_lookup lookup = cctz_time_zone.lookup(date);
|
||||||
|
|
||||||
start_of_day = std::chrono::system_clock::to_time_t(lookup.pre); /// Ambiguity is possible.
|
/// Ambiguity is possible if time was changed backwards at the midnight
|
||||||
|
/// or after midnight time has been changed back to midnight, for example one hour backwards at 01:00
|
||||||
|
/// or after midnight time has been changed to the previous day, for example two hours backwards at 01:00
|
||||||
|
/// Then midnight appears twice. Usually time change happens exactly at 00:00 or 01:00.
|
||||||
|
|
||||||
|
/// If transition did not involve previous day, we should use the first midnight as the start of the day,
|
||||||
|
/// otherwise it's better to use the second midnight.
|
||||||
|
|
||||||
|
std::chrono::time_point start_of_day_time_point = lookup.trans < lookup.post
|
||||||
|
? lookup.post /* Second midnight appears after transition, so there was a piece of previous day after transition */
|
||||||
|
: lookup.pre;
|
||||||
|
|
||||||
|
start_of_day = std::chrono::system_clock::to_time_t(start_of_day_time_point);
|
||||||
|
|
||||||
Values & values = lut[i];
|
Values & values = lut[i];
|
||||||
values.year = date.year();
|
values.year = date.year();
|
||||||
@ -72,7 +89,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
|
|||||||
values.day_of_week = getDayOfWeek(date);
|
values.day_of_week = getDayOfWeek(date);
|
||||||
values.date = start_of_day;
|
values.date = start_of_day;
|
||||||
|
|
||||||
assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR);
|
assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR + 1);
|
||||||
assert(values.month >= 1 && values.month <= 12);
|
assert(values.month >= 1 && values.month <= 12);
|
||||||
assert(values.day_of_month >= 1 && values.day_of_month <= 31);
|
assert(values.day_of_month >= 1 && values.day_of_month <= 31);
|
||||||
assert(values.day_of_week >= 1 && values.day_of_week <= 7);
|
assert(values.day_of_week >= 1 && values.day_of_week <= 7);
|
||||||
@ -85,50 +102,42 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
|
|||||||
else
|
else
|
||||||
values.days_in_month = i != 0 ? lut[i - 1].days_in_month : 31;
|
values.days_in_month = i != 0 ? lut[i - 1].days_in_month : 31;
|
||||||
|
|
||||||
values.time_at_offset_change = 0;
|
values.time_at_offset_change_value = 0;
|
||||||
values.amount_of_offset_change = 0;
|
values.amount_of_offset_change_value = 0;
|
||||||
|
|
||||||
if (start_of_day % 3600)
|
if (offset_is_whole_number_of_hours_during_epoch && start_of_day > 0 && start_of_day % 3600)
|
||||||
offset_is_whole_number_of_hours_everytime = false;
|
offset_is_whole_number_of_hours_during_epoch = false;
|
||||||
|
|
||||||
/// If UTC offset was changed in previous day.
|
/// If UTC offset was changed this day.
|
||||||
if (i != 0)
|
/// Change in time zone without transition is possible, e.g. Moscow 1991 Sun, 31 Mar, 02:00 MSK to EEST
|
||||||
|
cctz::time_zone::civil_transition transition{};
|
||||||
|
if (cctz_time_zone.next_transition(start_of_day_time_point - std::chrono::seconds(1), &transition)
|
||||||
|
&& (cctz::civil_day(transition.from) == date || cctz::civil_day(transition.to) == date)
|
||||||
|
&& transition.from != transition.to)
|
||||||
{
|
{
|
||||||
auto amount_of_offset_change_at_prev_day = 86400 - (lut[i].date - lut[i - 1].date);
|
values.time_at_offset_change_value = (transition.from - cctz::civil_second(date)) / Values::OffsetChangeFactor;
|
||||||
if (amount_of_offset_change_at_prev_day)
|
values.amount_of_offset_change_value = (transition.to - transition.from) / Values::OffsetChangeFactor;
|
||||||
{
|
|
||||||
lut[i - 1].amount_of_offset_change = amount_of_offset_change_at_prev_day;
|
|
||||||
|
|
||||||
const auto utc_offset_at_beginning_of_day = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(lut[i - 1].date)).offset;
|
// std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n";
|
||||||
|
// std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n";
|
||||||
|
|
||||||
/// Find a time (timestamp offset from beginning of day),
|
/// We don't support too large changes.
|
||||||
/// when UTC offset was changed. Search is performed with 15-minute granularity, assuming it is enough.
|
if (values.amount_of_offset_change_value > 24 * 4)
|
||||||
|
values.amount_of_offset_change_value = 24 * 4;
|
||||||
|
else if (values.amount_of_offset_change_value < -24 * 4)
|
||||||
|
values.amount_of_offset_change_value = -24 * 4;
|
||||||
|
|
||||||
time_t time_at_offset_change = 900;
|
/// We don't support cases when time change results in switching to previous day.
|
||||||
while (time_at_offset_change < 86400)
|
/// Shift the point of time change later.
|
||||||
{
|
if (values.time_at_offset_change_value + values.amount_of_offset_change_value < 0)
|
||||||
auto utc_offset_at_current_time = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(
|
values.time_at_offset_change_value = -values.amount_of_offset_change_value;
|
||||||
lut[i - 1].date + time_at_offset_change)).offset;
|
|
||||||
|
|
||||||
if (utc_offset_at_current_time != utc_offset_at_beginning_of_day)
|
|
||||||
break;
|
|
||||||
|
|
||||||
time_at_offset_change += 900;
|
|
||||||
}
|
|
||||||
|
|
||||||
lut[i - 1].time_at_offset_change = time_at_offset_change;
|
|
||||||
|
|
||||||
/// We doesn't support cases when time change results in switching to previous day.
|
|
||||||
if (static_cast<int>(lut[i - 1].time_at_offset_change) + static_cast<int>(lut[i - 1].amount_of_offset_change) < 0)
|
|
||||||
lut[i - 1].time_at_offset_change = -lut[i - 1].amount_of_offset_change;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Going to next day.
|
/// Going to next day.
|
||||||
++date;
|
++date;
|
||||||
++i;
|
++i;
|
||||||
}
|
}
|
||||||
while (start_of_day <= DATE_LUT_MAX && i <= DATE_LUT_MAX_DAY_NUM);
|
while (i < DATE_LUT_SIZE && lut[i - 1].year <= DATE_LUT_MAX_YEAR);
|
||||||
|
|
||||||
/// Fill excessive part of lookup table. This is needed only to simplify handling of overflow cases.
|
/// Fill excessive part of lookup table. This is needed only to simplify handling of overflow cases.
|
||||||
while (i < DATE_LUT_SIZE)
|
while (i < DATE_LUT_SIZE)
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -7,3 +7,8 @@
|
|||||||
* See DateLUTImpl for usage examples.
|
* See DateLUTImpl for usage examples.
|
||||||
*/
|
*/
|
||||||
STRONG_TYPEDEF(UInt16, DayNum)
|
STRONG_TYPEDEF(UInt16, DayNum)
|
||||||
|
|
||||||
|
/** Represent number of days since 1970-01-01 but in extended range,
|
||||||
|
* for dates before 1970-01-01 and after 2105
|
||||||
|
*/
|
||||||
|
STRONG_TYPEDEF(Int32, ExtendedDayNum)
|
||||||
|
@ -92,20 +92,10 @@ public:
|
|||||||
LocalDate(const LocalDate &) noexcept = default;
|
LocalDate(const LocalDate &) noexcept = default;
|
||||||
LocalDate & operator= (const LocalDate &) noexcept = default;
|
LocalDate & operator= (const LocalDate &) noexcept = default;
|
||||||
|
|
||||||
LocalDate & operator= (time_t time)
|
|
||||||
{
|
|
||||||
init(time);
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
operator time_t() const
|
|
||||||
{
|
|
||||||
return DateLUT::instance().makeDate(m_year, m_month, m_day);
|
|
||||||
}
|
|
||||||
|
|
||||||
DayNum getDayNum() const
|
DayNum getDayNum() const
|
||||||
{
|
{
|
||||||
return DateLUT::instance().makeDayNum(m_year, m_month, m_day);
|
const auto & lut = DateLUT::instance();
|
||||||
|
return DayNum(lut.makeDayNum(m_year, m_month, m_day).toUnderType());
|
||||||
}
|
}
|
||||||
|
|
||||||
operator DayNum() const
|
operator DayNum() const
|
||||||
@ -166,20 +156,3 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
static_assert(sizeof(LocalDate) == 4);
|
static_assert(sizeof(LocalDate) == 4);
|
||||||
|
|
||||||
|
|
||||||
inline std::ostream & operator<< (std::ostream & ostr, const LocalDate & date)
|
|
||||||
{
|
|
||||||
return ostr << date.year()
|
|
||||||
<< '-' << (date.month() / 10) << (date.month() % 10)
|
|
||||||
<< '-' << (date.day() / 10) << (date.day() % 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
namespace std
|
|
||||||
{
|
|
||||||
inline string to_string(const LocalDate & date)
|
|
||||||
{
|
|
||||||
return date.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -29,29 +29,16 @@ private:
|
|||||||
/// NOTE We may use attribute packed instead, but it is less portable.
|
/// NOTE We may use attribute packed instead, but it is less portable.
|
||||||
unsigned char pad = 0;
|
unsigned char pad = 0;
|
||||||
|
|
||||||
void init(time_t time)
|
void init(time_t time, const DateLUTImpl & time_zone)
|
||||||
{
|
{
|
||||||
if (unlikely(time > DATE_LUT_MAX || time == 0))
|
DateLUTImpl::DateTimeComponents components = time_zone.toDateTimeComponents(time);
|
||||||
{
|
|
||||||
m_year = 0;
|
|
||||||
m_month = 0;
|
|
||||||
m_day = 0;
|
|
||||||
m_hour = 0;
|
|
||||||
m_minute = 0;
|
|
||||||
m_second = 0;
|
|
||||||
|
|
||||||
return;
|
m_year = components.date.year;
|
||||||
}
|
m_month = components.date.month;
|
||||||
|
m_day = components.date.day;
|
||||||
const auto & date_lut = DateLUT::instance();
|
m_hour = components.time.hour;
|
||||||
const auto & values = date_lut.getValues(time);
|
m_minute = components.time.minute;
|
||||||
|
m_second = components.time.second;
|
||||||
m_year = values.year;
|
|
||||||
m_month = values.month;
|
|
||||||
m_day = values.day_of_month;
|
|
||||||
m_hour = date_lut.toHour(time);
|
|
||||||
m_minute = date_lut.toMinute(time);
|
|
||||||
m_second = date_lut.toSecond(time);
|
|
||||||
|
|
||||||
(void)pad; /// Suppress unused private field warning.
|
(void)pad; /// Suppress unused private field warning.
|
||||||
}
|
}
|
||||||
@ -73,9 +60,9 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit LocalDateTime(time_t time)
|
explicit LocalDateTime(time_t time, const DateLUTImpl & time_zone = DateLUT::instance())
|
||||||
{
|
{
|
||||||
init(time);
|
init(time, time_zone);
|
||||||
}
|
}
|
||||||
|
|
||||||
LocalDateTime(unsigned short year_, unsigned char month_, unsigned char day_,
|
LocalDateTime(unsigned short year_, unsigned char month_, unsigned char day_,
|
||||||
@ -104,19 +91,6 @@ public:
|
|||||||
LocalDateTime(const LocalDateTime &) noexcept = default;
|
LocalDateTime(const LocalDateTime &) noexcept = default;
|
||||||
LocalDateTime & operator= (const LocalDateTime &) noexcept = default;
|
LocalDateTime & operator= (const LocalDateTime &) noexcept = default;
|
||||||
|
|
||||||
LocalDateTime & operator= (time_t time)
|
|
||||||
{
|
|
||||||
init(time);
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
operator time_t() const
|
|
||||||
{
|
|
||||||
return m_year == 0
|
|
||||||
? 0
|
|
||||||
: DateLUT::instance().makeDateTime(m_year, m_month, m_day, m_hour, m_minute, m_second);
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned short year() const { return m_year; }
|
unsigned short year() const { return m_year; }
|
||||||
unsigned char month() const { return m_month; }
|
unsigned char month() const { return m_month; }
|
||||||
unsigned char day() const { return m_day; }
|
unsigned char day() const { return m_day; }
|
||||||
@ -132,8 +106,30 @@ public:
|
|||||||
void second(unsigned char x) { m_second = x; }
|
void second(unsigned char x) { m_second = x; }
|
||||||
|
|
||||||
LocalDate toDate() const { return LocalDate(m_year, m_month, m_day); }
|
LocalDate toDate() const { return LocalDate(m_year, m_month, m_day); }
|
||||||
|
LocalDateTime toStartOfDate() const { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
|
||||||
|
|
||||||
LocalDateTime toStartOfDate() { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
|
std::string toString() const
|
||||||
|
{
|
||||||
|
std::string s{"0000-00-00 00:00:00"};
|
||||||
|
|
||||||
|
s[0] += m_year / 1000;
|
||||||
|
s[1] += (m_year / 100) % 10;
|
||||||
|
s[2] += (m_year / 10) % 10;
|
||||||
|
s[3] += m_year % 10;
|
||||||
|
s[5] += m_month / 10;
|
||||||
|
s[6] += m_month % 10;
|
||||||
|
s[8] += m_day / 10;
|
||||||
|
s[9] += m_day % 10;
|
||||||
|
|
||||||
|
s[11] += m_hour / 10;
|
||||||
|
s[12] += m_hour % 10;
|
||||||
|
s[14] += m_minute / 10;
|
||||||
|
s[15] += m_minute % 10;
|
||||||
|
s[17] += m_second / 10;
|
||||||
|
s[18] += m_second % 10;
|
||||||
|
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
bool operator< (const LocalDateTime & other) const
|
bool operator< (const LocalDateTime & other) const
|
||||||
{
|
{
|
||||||
@ -167,28 +163,3 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
static_assert(sizeof(LocalDateTime) == 8);
|
static_assert(sizeof(LocalDateTime) == 8);
|
||||||
|
|
||||||
|
|
||||||
inline std::ostream & operator<< (std::ostream & ostr, const LocalDateTime & datetime)
|
|
||||||
{
|
|
||||||
ostr << std::setfill('0') << std::setw(4) << datetime.year();
|
|
||||||
|
|
||||||
ostr << '-' << (datetime.month() / 10) << (datetime.month() % 10)
|
|
||||||
<< '-' << (datetime.day() / 10) << (datetime.day() % 10)
|
|
||||||
<< ' ' << (datetime.hour() / 10) << (datetime.hour() % 10)
|
|
||||||
<< ':' << (datetime.minute() / 10) << (datetime.minute() % 10)
|
|
||||||
<< ':' << (datetime.second() / 10) << (datetime.second() % 10);
|
|
||||||
|
|
||||||
return ostr;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
namespace std
|
|
||||||
{
|
|
||||||
inline string to_string(const LocalDateTime & datetime)
|
|
||||||
{
|
|
||||||
stringstream str;
|
|
||||||
str << datetime;
|
|
||||||
return str.str();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
33
base/common/MoveOrCopyIfThrow.h
Normal file
33
base/common/MoveOrCopyIfThrow.h
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <common/types.h>
|
||||||
|
|
||||||
|
namespace detail
|
||||||
|
{
|
||||||
|
template <typename T, bool is_nothrow_move_assignable = std::is_nothrow_move_assignable_v<T>>
|
||||||
|
struct MoveOrCopyIfThrow;
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
struct MoveOrCopyIfThrow<T, true>
|
||||||
|
{
|
||||||
|
void operator()(T && src, T & dst) const
|
||||||
|
{
|
||||||
|
dst = std::forward<T>(src);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
struct MoveOrCopyIfThrow<T, false>
|
||||||
|
{
|
||||||
|
void operator()(T && src, T & dst) const
|
||||||
|
{
|
||||||
|
dst = src;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void moveOrCopyIfThrow(T && src, T & dst)
|
||||||
|
{
|
||||||
|
MoveOrCopyIfThrow<T>()(std::forward<T>(src), dst);
|
||||||
|
}
|
||||||
|
}
|
@ -12,6 +12,8 @@
|
|||||||
#include <dlfcn.h>
|
#include <dlfcn.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
@ -189,8 +191,8 @@ void ReplxxLineReader::openEditor()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
String editor = std::getenv("EDITOR");
|
const char * editor = std::getenv("EDITOR");
|
||||||
if (editor.empty())
|
if (!editor || !*editor)
|
||||||
editor = "vim";
|
editor = "vim";
|
||||||
|
|
||||||
replxx::Replxx::State state(rx.get_state());
|
replxx::Replxx::State state(rx.get_state());
|
||||||
@ -204,7 +206,7 @@ void ReplxxLineReader::openEditor()
|
|||||||
if ((-1 == res || 0 == res) && errno != EINTR)
|
if ((-1 == res || 0 == res) && errno != EINTR)
|
||||||
{
|
{
|
||||||
rx.print("Cannot write to temporary query file %s: %s\n", filename, errnoToString(errno).c_str());
|
rx.print("Cannot write to temporary query file %s: %s\n", filename, errnoToString(errno).c_str());
|
||||||
return;
|
break;
|
||||||
}
|
}
|
||||||
bytes_written += res;
|
bytes_written += res;
|
||||||
}
|
}
|
||||||
@ -215,7 +217,7 @@ void ReplxxLineReader::openEditor()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (0 == execute(editor + " " + filename))
|
if (0 == execute(fmt::format("{} {}", editor, filename)))
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
@ -1,9 +1,36 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <common/extended_types.h>
|
#include <common/extended_types.h>
|
||||||
|
#include <common/defines.h>
|
||||||
|
|
||||||
|
|
||||||
namespace common
|
namespace common
|
||||||
{
|
{
|
||||||
|
/// Multiply and ignore overflow.
|
||||||
|
template <typename T1, typename T2>
|
||||||
|
inline auto NO_SANITIZE_UNDEFINED mulIgnoreOverflow(T1 x, T2 y)
|
||||||
|
{
|
||||||
|
return x * y;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T1, typename T2>
|
||||||
|
inline auto NO_SANITIZE_UNDEFINED addIgnoreOverflow(T1 x, T2 y)
|
||||||
|
{
|
||||||
|
return x + y;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T1, typename T2>
|
||||||
|
inline auto NO_SANITIZE_UNDEFINED subIgnoreOverflow(T1 x, T2 y)
|
||||||
|
{
|
||||||
|
return x - y;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
inline auto NO_SANITIZE_UNDEFINED negateIgnoreOverflow(T x)
|
||||||
|
{
|
||||||
|
return -x;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline bool addOverflow(T x, T y, T & res)
|
inline bool addOverflow(T x, T y, T & res)
|
||||||
{
|
{
|
||||||
@ -33,14 +60,14 @@ namespace common
|
|||||||
{
|
{
|
||||||
static constexpr __int128 min_int128 = minInt128();
|
static constexpr __int128 min_int128 = minInt128();
|
||||||
static constexpr __int128 max_int128 = maxInt128();
|
static constexpr __int128 max_int128 = maxInt128();
|
||||||
res = x + y;
|
res = addIgnoreOverflow(x, y);
|
||||||
return (y > 0 && x > max_int128 - y) || (y < 0 && x < min_int128 - y);
|
return (y > 0 && x > max_int128 - y) || (y < 0 && x < min_int128 - y);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
inline bool addOverflow(Int256 x, Int256 y, Int256 & res)
|
inline bool addOverflow(Int256 x, Int256 y, Int256 & res)
|
||||||
{
|
{
|
||||||
res = x + y;
|
res = addIgnoreOverflow(x, y);
|
||||||
return (y > 0 && x > std::numeric_limits<Int256>::max() - y) ||
|
return (y > 0 && x > std::numeric_limits<Int256>::max() - y) ||
|
||||||
(y < 0 && x < std::numeric_limits<Int256>::min() - y);
|
(y < 0 && x < std::numeric_limits<Int256>::min() - y);
|
||||||
}
|
}
|
||||||
@ -48,7 +75,7 @@ namespace common
|
|||||||
template <>
|
template <>
|
||||||
inline bool addOverflow(UInt256 x, UInt256 y, UInt256 & res)
|
inline bool addOverflow(UInt256 x, UInt256 y, UInt256 & res)
|
||||||
{
|
{
|
||||||
res = x + y;
|
res = addIgnoreOverflow(x, y);
|
||||||
return x > std::numeric_limits<UInt256>::max() - y;
|
return x > std::numeric_limits<UInt256>::max() - y;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,14 +108,14 @@ namespace common
|
|||||||
{
|
{
|
||||||
static constexpr __int128 min_int128 = minInt128();
|
static constexpr __int128 min_int128 = minInt128();
|
||||||
static constexpr __int128 max_int128 = maxInt128();
|
static constexpr __int128 max_int128 = maxInt128();
|
||||||
res = x - y;
|
res = subIgnoreOverflow(x, y);
|
||||||
return (y < 0 && x > max_int128 + y) || (y > 0 && x < min_int128 + y);
|
return (y < 0 && x > max_int128 + y) || (y > 0 && x < min_int128 + y);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
inline bool subOverflow(Int256 x, Int256 y, Int256 & res)
|
inline bool subOverflow(Int256 x, Int256 y, Int256 & res)
|
||||||
{
|
{
|
||||||
res = x - y;
|
res = subIgnoreOverflow(x, y);
|
||||||
return (y < 0 && x > std::numeric_limits<Int256>::max() + y) ||
|
return (y < 0 && x > std::numeric_limits<Int256>::max() + y) ||
|
||||||
(y > 0 && x < std::numeric_limits<Int256>::min() + y);
|
(y > 0 && x < std::numeric_limits<Int256>::min() + y);
|
||||||
}
|
}
|
||||||
@ -96,7 +123,7 @@ namespace common
|
|||||||
template <>
|
template <>
|
||||||
inline bool subOverflow(UInt256 x, UInt256 y, UInt256 & res)
|
inline bool subOverflow(UInt256 x, UInt256 y, UInt256 & res)
|
||||||
{
|
{
|
||||||
res = x - y;
|
res = subIgnoreOverflow(x, y);
|
||||||
return x < y;
|
return x < y;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,25 +154,25 @@ namespace common
|
|||||||
template <>
|
template <>
|
||||||
inline bool mulOverflow(Int128 x, Int128 y, Int128 & res)
|
inline bool mulOverflow(Int128 x, Int128 y, Int128 & res)
|
||||||
{
|
{
|
||||||
res = static_cast<UInt128>(x) * static_cast<UInt128>(y); /// Avoid signed integer overflow.
|
res = mulIgnoreOverflow(x, y);
|
||||||
if (!x || !y)
|
if (!x || !y)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
UInt128 a = (x > 0) ? x : -x;
|
UInt128 a = (x > 0) ? x : -x;
|
||||||
UInt128 b = (y > 0) ? y : -y;
|
UInt128 b = (y > 0) ? y : -y;
|
||||||
return (a * b) / b != a;
|
return mulIgnoreOverflow(a, b) / b != a;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
inline bool mulOverflow(Int256 x, Int256 y, Int256 & res)
|
inline bool mulOverflow(Int256 x, Int256 y, Int256 & res)
|
||||||
{
|
{
|
||||||
res = x * y;
|
res = mulIgnoreOverflow(x, y);
|
||||||
if (!x || !y)
|
if (!x || !y)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
Int256 a = (x > 0) ? x : -x;
|
Int256 a = (x > 0) ? x : -x;
|
||||||
Int256 b = (y > 0) ? y : -y;
|
Int256 b = (y > 0) ? y : -y;
|
||||||
return (a * b) / b != a;
|
return mulIgnoreOverflow(a, b) / b != a;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
@ -160,9 +187,9 @@ namespace common
|
|||||||
template <>
|
template <>
|
||||||
inline bool mulOverflow(UInt256 x, UInt256 y, UInt256 & res)
|
inline bool mulOverflow(UInt256 x, UInt256 y, UInt256 & res)
|
||||||
{
|
{
|
||||||
res = x * y;
|
res = mulIgnoreOverflow(x, y);
|
||||||
if (!x || !y)
|
if (!x || !y)
|
||||||
return false;
|
return false;
|
||||||
return (x * y) / y != x;
|
return res / y != x;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,20 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
/// __has_feature supported only by clang.
|
||||||
|
///
|
||||||
|
/// But libcxx/libcxxabi overrides it to 0,
|
||||||
|
/// thus the checks for __has_feature will be wrong.
|
||||||
|
///
|
||||||
|
/// NOTE:
|
||||||
|
/// - __has_feature cannot be simply undefined,
|
||||||
|
/// since this will be broken if some C++ header will be included after
|
||||||
|
/// including <common/defines.h>
|
||||||
|
/// - it should not have fallback to 0,
|
||||||
|
/// since this may create false-positive detection (common problem)
|
||||||
|
#if defined(__clang__) && defined(__has_feature)
|
||||||
|
# define ch_has_feature __has_feature
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
# if !defined(likely)
|
# if !defined(likely)
|
||||||
# define likely(x) (x)
|
# define likely(x) (x)
|
||||||
@ -32,8 +47,8 @@
|
|||||||
|
|
||||||
/// Check for presence of address sanitizer
|
/// Check for presence of address sanitizer
|
||||||
#if !defined(ADDRESS_SANITIZER)
|
#if !defined(ADDRESS_SANITIZER)
|
||||||
# if defined(__has_feature)
|
# if defined(ch_has_feature)
|
||||||
# if __has_feature(address_sanitizer)
|
# if ch_has_feature(address_sanitizer)
|
||||||
# define ADDRESS_SANITIZER 1
|
# define ADDRESS_SANITIZER 1
|
||||||
# endif
|
# endif
|
||||||
# elif defined(__SANITIZE_ADDRESS__)
|
# elif defined(__SANITIZE_ADDRESS__)
|
||||||
@ -42,8 +57,8 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(THREAD_SANITIZER)
|
#if !defined(THREAD_SANITIZER)
|
||||||
# if defined(__has_feature)
|
# if defined(ch_has_feature)
|
||||||
# if __has_feature(thread_sanitizer)
|
# if ch_has_feature(thread_sanitizer)
|
||||||
# define THREAD_SANITIZER 1
|
# define THREAD_SANITIZER 1
|
||||||
# endif
|
# endif
|
||||||
# elif defined(__SANITIZE_THREAD__)
|
# elif defined(__SANITIZE_THREAD__)
|
||||||
@ -52,8 +67,8 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(MEMORY_SANITIZER)
|
#if !defined(MEMORY_SANITIZER)
|
||||||
# if defined(__has_feature)
|
# if defined(ch_has_feature)
|
||||||
# if __has_feature(memory_sanitizer)
|
# if ch_has_feature(memory_sanitizer)
|
||||||
# define MEMORY_SANITIZER 1
|
# define MEMORY_SANITIZER 1
|
||||||
# endif
|
# endif
|
||||||
# elif defined(__MEMORY_SANITIZER__)
|
# elif defined(__MEMORY_SANITIZER__)
|
||||||
@ -61,6 +76,16 @@
|
|||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if !defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||||
|
# if defined(__has_feature)
|
||||||
|
# if __has_feature(undefined_behavior_sanitizer)
|
||||||
|
# define UNDEFINED_BEHAVIOR_SANITIZER 1
|
||||||
|
# endif
|
||||||
|
# elif defined(__UNDEFINED_BEHAVIOR_SANITIZER__)
|
||||||
|
# define UNDEFINED_BEHAVIOR_SANITIZER 1
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(ADDRESS_SANITIZER)
|
#if defined(ADDRESS_SANITIZER)
|
||||||
# define BOOST_USE_ASAN 1
|
# define BOOST_USE_ASAN 1
|
||||||
# define BOOST_USE_UCONTEXT 1
|
# define BOOST_USE_UCONTEXT 1
|
||||||
@ -84,10 +109,12 @@
|
|||||||
# define NO_SANITIZE_UNDEFINED __attribute__((__no_sanitize__("undefined")))
|
# define NO_SANITIZE_UNDEFINED __attribute__((__no_sanitize__("undefined")))
|
||||||
# define NO_SANITIZE_ADDRESS __attribute__((__no_sanitize__("address")))
|
# define NO_SANITIZE_ADDRESS __attribute__((__no_sanitize__("address")))
|
||||||
# define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
|
# define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
|
||||||
|
# define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED __attribute__((__always_inline__, __no_sanitize__("undefined")))
|
||||||
#else /// It does not work in GCC. GCC 7 cannot recognize this attribute and GCC 8 simply ignores it.
|
#else /// It does not work in GCC. GCC 7 cannot recognize this attribute and GCC 8 simply ignores it.
|
||||||
# define NO_SANITIZE_UNDEFINED
|
# define NO_SANITIZE_UNDEFINED
|
||||||
# define NO_SANITIZE_ADDRESS
|
# define NO_SANITIZE_ADDRESS
|
||||||
# define NO_SANITIZE_THREAD
|
# define NO_SANITIZE_THREAD
|
||||||
|
# define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED ALWAYS_INLINE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/// A template function for suppressing warnings about unused variables or function results.
|
/// A template function for suppressing warnings about unused variables or function results.
|
||||||
|
@ -15,11 +15,11 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define __msan_unpoison(X, Y) // NOLINT
|
#define __msan_unpoison(X, Y) // NOLINT
|
||||||
#if defined(__has_feature)
|
#if defined(ch_has_feature)
|
||||||
# if __has_feature(memory_sanitizer)
|
# if ch_has_feature(memory_sanitizer)
|
||||||
# undef __msan_unpoison
|
# undef __msan_unpoison
|
||||||
# include <sanitizer/msan_interface.h>
|
# include <sanitizer/msan_interface.h>
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <link.h>
|
#include <link.h>
|
||||||
|
@ -1,45 +1,28 @@
|
|||||||
// https://stackoverflow.com/questions/1413445/reading-a-password-from-stdcin
|
|
||||||
|
|
||||||
#include <common/setTerminalEcho.h>
|
#include <common/setTerminalEcho.h>
|
||||||
#include <common/errnoToString.h>
|
#include <common/errnoToString.h>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#ifdef WIN32
|
|
||||||
#include <windows.h>
|
|
||||||
#else
|
|
||||||
#include <termios.h>
|
#include <termios.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <errno.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void setTerminalEcho(bool enable)
|
void setTerminalEcho(bool enable)
|
||||||
{
|
{
|
||||||
#ifdef WIN32
|
/// Obtain terminal attributes,
|
||||||
auto handle = GetStdHandle(STD_INPUT_HANDLE);
|
/// toggle the ECHO flag
|
||||||
DWORD mode;
|
/// and set them back.
|
||||||
if (!GetConsoleMode(handle, &mode))
|
|
||||||
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + std::to_string(GetLastError()));
|
|
||||||
|
|
||||||
if (!enable)
|
struct termios tty{};
|
||||||
mode &= ~ENABLE_ECHO_INPUT;
|
|
||||||
else
|
|
||||||
mode |= ENABLE_ECHO_INPUT;
|
|
||||||
|
|
||||||
if (!SetConsoleMode(handle, mode))
|
if (0 != tcgetattr(STDIN_FILENO, &tty))
|
||||||
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + std::to_string(GetLastError()));
|
|
||||||
#else
|
|
||||||
struct termios tty;
|
|
||||||
if (tcgetattr(STDIN_FILENO, &tty))
|
|
||||||
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString(errno));
|
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString(errno));
|
||||||
if (!enable)
|
|
||||||
tty.c_lflag &= ~ECHO;
|
|
||||||
else
|
|
||||||
tty.c_lflag |= ECHO;
|
|
||||||
|
|
||||||
auto ret = tcsetattr(STDIN_FILENO, TCSANOW, &tty);
|
if (enable)
|
||||||
if (ret)
|
tty.c_lflag |= ECHO;
|
||||||
|
else
|
||||||
|
tty.c_lflag &= ~ECHO;
|
||||||
|
|
||||||
|
if (0 != tcsetattr(STDIN_FILENO, TCSANOW, &tty))
|
||||||
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString(errno));
|
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString(errno));
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ private:
|
|||||||
T t;
|
T t;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
using UnderlyingType = T;
|
||||||
template <class Enable = typename std::is_copy_constructible<T>::type>
|
template <class Enable = typename std::is_copy_constructible<T>::type>
|
||||||
explicit StrongTypedef(const T & t_) : t(t_) {}
|
explicit StrongTypedef(const T & t_) : t(t_) {}
|
||||||
template <class Enable = typename std::is_move_constructible<T>::type>
|
template <class Enable = typename std::is_move_constructible<T>::type>
|
||||||
|
@ -1,25 +1,2 @@
|
|||||||
include (${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake)
|
|
||||||
|
|
||||||
add_executable (date_lut2 date_lut2.cpp)
|
|
||||||
add_executable (date_lut3 date_lut3.cpp)
|
|
||||||
add_executable (date_lut_default_timezone date_lut_default_timezone.cpp)
|
|
||||||
add_executable (local_date_time_comparison local_date_time_comparison.cpp)
|
|
||||||
add_executable (realloc-perf allocator.cpp)
|
|
||||||
|
|
||||||
set(PLATFORM_LIBS ${CMAKE_DL_LIBS})
|
|
||||||
|
|
||||||
target_link_libraries (date_lut2 PRIVATE common ${PLATFORM_LIBS})
|
|
||||||
target_link_libraries (date_lut3 PRIVATE common ${PLATFORM_LIBS})
|
|
||||||
target_link_libraries (date_lut_default_timezone PRIVATE common ${PLATFORM_LIBS})
|
|
||||||
target_link_libraries (local_date_time_comparison PRIVATE common)
|
|
||||||
target_link_libraries (realloc-perf PRIVATE common)
|
|
||||||
add_check(local_date_time_comparison)
|
|
||||||
|
|
||||||
if(USE_GTEST)
|
|
||||||
add_executable(unit_tests_libcommon gtest_json_test.cpp gtest_strong_typedef.cpp gtest_find_symbols.cpp)
|
|
||||||
target_link_libraries(unit_tests_libcommon PRIVATE common ${GTEST_MAIN_LIBRARIES} ${GTEST_LIBRARIES})
|
|
||||||
add_check(unit_tests_libcommon)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_executable (dump_variable dump_variable.cpp)
|
add_executable (dump_variable dump_variable.cpp)
|
||||||
target_link_libraries (dump_variable PRIVATE clickhouse_common_io)
|
target_link_libraries (dump_variable PRIVATE clickhouse_common_io)
|
||||||
|
@ -1,47 +0,0 @@
|
|||||||
#include <cstdlib>
|
|
||||||
#include <cstring>
|
|
||||||
#include <vector>
|
|
||||||
#include <thread>
|
|
||||||
|
|
||||||
|
|
||||||
void thread_func()
|
|
||||||
{
|
|
||||||
for (size_t i = 0; i < 100; ++i)
|
|
||||||
{
|
|
||||||
size_t size = 4096;
|
|
||||||
|
|
||||||
void * buf = malloc(size);
|
|
||||||
if (!buf)
|
|
||||||
abort();
|
|
||||||
memset(buf, 0, size);
|
|
||||||
|
|
||||||
while (size < 1048576)
|
|
||||||
{
|
|
||||||
size_t next_size = size * 4;
|
|
||||||
|
|
||||||
void * new_buf = realloc(buf, next_size);
|
|
||||||
if (!new_buf)
|
|
||||||
abort();
|
|
||||||
buf = new_buf;
|
|
||||||
|
|
||||||
memset(reinterpret_cast<char*>(buf) + size, 0, next_size - size);
|
|
||||||
size = next_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
free(buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int main(int, char **)
|
|
||||||
{
|
|
||||||
std::vector<std::thread> threads(16);
|
|
||||||
for (size_t i = 0; i < 1000; ++i)
|
|
||||||
{
|
|
||||||
for (auto & thread : threads)
|
|
||||||
thread = std::thread(thread_func);
|
|
||||||
for (auto & thread : threads)
|
|
||||||
thread.join();
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
@ -1,53 +0,0 @@
|
|||||||
#include <iostream>
|
|
||||||
#include <cstring>
|
|
||||||
|
|
||||||
#include <common/DateLUT.h>
|
|
||||||
|
|
||||||
|
|
||||||
static std::string toString(time_t Value)
|
|
||||||
{
|
|
||||||
struct tm tm;
|
|
||||||
char buf[96];
|
|
||||||
|
|
||||||
localtime_r(&Value, &tm);
|
|
||||||
snprintf(buf, sizeof(buf), "%04d-%02d-%02d %02d:%02d:%02d",
|
|
||||||
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
|
|
||||||
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
static time_t orderedIdentifierToDate(unsigned value)
|
|
||||||
{
|
|
||||||
struct tm tm;
|
|
||||||
|
|
||||||
memset(&tm, 0, sizeof(tm));
|
|
||||||
|
|
||||||
tm.tm_year = value / 10000 - 1900;
|
|
||||||
tm.tm_mon = (value % 10000) / 100 - 1;
|
|
||||||
tm.tm_mday = value % 100;
|
|
||||||
tm.tm_isdst = -1;
|
|
||||||
|
|
||||||
return mktime(&tm);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void loop(time_t begin, time_t end, int step)
|
|
||||||
{
|
|
||||||
const auto & date_lut = DateLUT::instance();
|
|
||||||
|
|
||||||
for (time_t t = begin; t < end; t += step)
|
|
||||||
std::cout << toString(t)
|
|
||||||
<< ", " << toString(date_lut.toTime(t))
|
|
||||||
<< ", " << date_lut.toHour(t)
|
|
||||||
<< std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int main(int, char **)
|
|
||||||
{
|
|
||||||
loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60);
|
|
||||||
loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60);
|
|
||||||
loop(orderedIdentifierToDate(20141020), orderedIdentifierToDate(20141106), 15 * 60);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
@ -1,62 +0,0 @@
|
|||||||
#include <iostream>
|
|
||||||
#include <cstring>
|
|
||||||
|
|
||||||
#include <Poco/Exception.h>
|
|
||||||
|
|
||||||
#include <common/DateLUT.h>
|
|
||||||
|
|
||||||
|
|
||||||
static std::string toString(time_t Value)
|
|
||||||
{
|
|
||||||
struct tm tm;
|
|
||||||
char buf[96];
|
|
||||||
|
|
||||||
localtime_r(&Value, &tm);
|
|
||||||
snprintf(buf, sizeof(buf), "%04d-%02d-%02d %02d:%02d:%02d",
|
|
||||||
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
|
|
||||||
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
static time_t orderedIdentifierToDate(unsigned value)
|
|
||||||
{
|
|
||||||
struct tm tm;
|
|
||||||
|
|
||||||
memset(&tm, 0, sizeof(tm));
|
|
||||||
|
|
||||||
tm.tm_year = value / 10000 - 1900;
|
|
||||||
tm.tm_mon = (value % 10000) / 100 - 1;
|
|
||||||
tm.tm_mday = value % 100;
|
|
||||||
tm.tm_isdst = -1;
|
|
||||||
|
|
||||||
return mktime(&tm);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void loop(time_t begin, time_t end, int step)
|
|
||||||
{
|
|
||||||
const auto & date_lut = DateLUT::instance();
|
|
||||||
|
|
||||||
for (time_t t = begin; t < end; t += step)
|
|
||||||
{
|
|
||||||
time_t t2 = date_lut.makeDateTime(date_lut.toYear(t), date_lut.toMonth(t), date_lut.toDayOfMonth(t),
|
|
||||||
date_lut.toHour(t), date_lut.toMinute(t), date_lut.toSecond(t));
|
|
||||||
|
|
||||||
std::string s1 = toString(t);
|
|
||||||
std::string s2 = toString(t2);
|
|
||||||
|
|
||||||
std::cerr << s1 << ", " << s2 << std::endl;
|
|
||||||
|
|
||||||
if (s1 != s2)
|
|
||||||
throw Poco::Exception("Test failed.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int main(int, char **)
|
|
||||||
{
|
|
||||||
loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60);
|
|
||||||
loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
@ -1,31 +0,0 @@
|
|||||||
#include <iostream>
|
|
||||||
#include <common/DateLUT.h>
|
|
||||||
#include <Poco/Exception.h>
|
|
||||||
|
|
||||||
int main(int, char **)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
const auto & date_lut = DateLUT::instance();
|
|
||||||
std::cout << "Detected default timezone: `" << date_lut.getTimeZone() << "'" << std::endl;
|
|
||||||
time_t now = time(nullptr);
|
|
||||||
std::cout << "Current time: " << date_lut.timeToString(now)
|
|
||||||
<< ", UTC: " << DateLUT::instance("UTC").timeToString(now) << std::endl;
|
|
||||||
}
|
|
||||||
catch (const Poco::Exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << e.displayText() << std::endl;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
catch (std::exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << "std::exception: " << e.what() << std::endl;
|
|
||||||
return 2;
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
std::cerr << "Some exception" << std::endl;
|
|
||||||
return 3;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
@ -1,656 +0,0 @@
|
|||||||
#include <vector>
|
|
||||||
#include <string>
|
|
||||||
#include <exception>
|
|
||||||
#include <common/JSON.h>
|
|
||||||
|
|
||||||
#include <boost/range/irange.hpp>
|
|
||||||
|
|
||||||
using namespace std::literals::string_literals;
|
|
||||||
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
enum class ResultType
|
|
||||||
{
|
|
||||||
Return,
|
|
||||||
Throw
|
|
||||||
};
|
|
||||||
|
|
||||||
struct GetStringTestRecord
|
|
||||||
{
|
|
||||||
const char * input;
|
|
||||||
ResultType result_type;
|
|
||||||
const char * result;
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST(JSONSuite, SimpleTest)
|
|
||||||
{
|
|
||||||
std::vector<GetStringTestRecord> test_data =
|
|
||||||
{
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Вафельница Vitek WX-1102 FL")", ResultType::Return, "Вафельница Vitek WX-1102 FL" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("184509")", ResultType::Return, "184509" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("Все для детей/Детская техника/Vitek")", ResultType::Return, "Все для детей/Детская техника/Vitek" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("В наличии")", ResultType::Return, "В наличии" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("2390.00")", ResultType::Return, "2390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("Карточка")", ResultType::Return, "Карточка" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("detail")", ResultType::Return, "detail" },
|
|
||||||
{ R"("actionField")", ResultType::Return, "actionField" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc")", ResultType::Return, "http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc" },
|
|
||||||
{ R"("action")", ResultType::Return, "action" },
|
|
||||||
{ R"("detail")", ResultType::Return, "detail" },
|
|
||||||
{ R"("products")", ResultType::Return, "products" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Вафельница Vitek WX-1102 FL")", ResultType::Return, "Вафельница Vitek WX-1102 FL" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("184509")", ResultType::Return, "184509" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("2390.00")", ResultType::Return, "2390.00" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("Vitek")", ResultType::Return, "Vitek" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("Все для детей/Детская техника/Vitek")", ResultType::Return, "Все для детей/Детская техника/Vitek" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("В наличии")", ResultType::Return, "В наличии" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("experiments")", ResultType::Return, "experiments" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("los_portal")", ResultType::Return, "los_portal" },
|
|
||||||
{ R"("los_level")", ResultType::Return, "los_level" },
|
|
||||||
{ R"("none")", ResultType::Return, "none" },
|
|
||||||
{ R"("isAuthorized")", ResultType::Return, "isAuthorized" },
|
|
||||||
{ R"("isSubscriber")", ResultType::Return, "isSubscriber" },
|
|
||||||
{ R"("postType")", ResultType::Return, "postType" },
|
|
||||||
{ R"("Новости")", ResultType::Return, "Новости" },
|
|
||||||
{ R"("experiments")", ResultType::Return, "experiments" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("los_portal")", ResultType::Return, "los_portal" },
|
|
||||||
{ R"("los_level")", ResultType::Return, "los_level" },
|
|
||||||
{ R"("none")", ResultType::Return, "none" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("Электроплита GEFEST Брест ЭПНД 5140-01 0001")", ResultType::Return, "Электроплита GEFEST Брест ЭПНД 5140-01 0001" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("currencyCode")", ResultType::Return, "currencyCode" },
|
|
||||||
{ R"("RUB")", ResultType::Return, "RUB" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("experiments")", ResultType::Return, "experiments" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("los_portal")", ResultType::Return, "los_portal" },
|
|
||||||
{ R"("los_level")", ResultType::Return, "los_level" },
|
|
||||||
{ R"("none")", ResultType::Return, "none" },
|
|
||||||
{ R"("trash_login")", ResultType::Return, "trash_login" },
|
|
||||||
{ R"("novikoff")", ResultType::Return, "novikoff" },
|
|
||||||
{ R"("trash_cat_link")", ResultType::Return, "trash_cat_link" },
|
|
||||||
{ R"("progs")", ResultType::Return, "progs" },
|
|
||||||
{ R"("trash_parent_link")", ResultType::Return, "trash_parent_link" },
|
|
||||||
{ R"("content")", ResultType::Return, "content" },
|
|
||||||
{ R"("trash_posted_parent")", ResultType::Return, "trash_posted_parent" },
|
|
||||||
{ R"("content.01.2016")", ResultType::Return, "content.01.2016" },
|
|
||||||
{ R"("trash_posted_cat")", ResultType::Return, "trash_posted_cat" },
|
|
||||||
{ R"("progs.01.2016")", ResultType::Return, "progs.01.2016" },
|
|
||||||
{ R"("trash_virus_count")", ResultType::Return, "trash_virus_count" },
|
|
||||||
{ R"("trash_is_android")", ResultType::Return, "trash_is_android" },
|
|
||||||
{ R"("trash_is_wp8")", ResultType::Return, "trash_is_wp8" },
|
|
||||||
{ R"("trash_is_ios")", ResultType::Return, "trash_is_ios" },
|
|
||||||
{ R"("trash_posted")", ResultType::Return, "trash_posted" },
|
|
||||||
{ R"("01.2016")", ResultType::Return, "01.2016" },
|
|
||||||
{ R"("experiments")", ResultType::Return, "experiments" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("los_portal")", ResultType::Return, "los_portal" },
|
|
||||||
{ R"("los_level")", ResultType::Return, "los_level" },
|
|
||||||
{ R"("none")", ResultType::Return, "none" },
|
|
||||||
{ R"("merchantId")", ResultType::Return, "merchantId" },
|
|
||||||
{ R"("13694_49246")", ResultType::Return, "13694_49246" },
|
|
||||||
{ R"("cps-source")", ResultType::Return, "cps-source" },
|
|
||||||
{ R"("wargaming")", ResultType::Return, "wargaming" },
|
|
||||||
{ R"("cps_provider")", ResultType::Return, "cps_provider" },
|
|
||||||
{ R"("default")", ResultType::Return, "default" },
|
|
||||||
{ R"("errorReason")", ResultType::Return, "errorReason" },
|
|
||||||
{ R"("no errors")", ResultType::Return, "no errors" },
|
|
||||||
{ R"("scid")", ResultType::Return, "scid" },
|
|
||||||
{ R"("isAuthPayment")", ResultType::Return, "isAuthPayment" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("rubric")", ResultType::Return, "rubric" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("rubric")", ResultType::Return, "rubric" },
|
|
||||||
{ R"("Мир")", ResultType::Return, "Мир" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("experiments")", ResultType::Return, "experiments" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("los_portal")", ResultType::Return, "los_portal" },
|
|
||||||
{ R"("los_level")", ResultType::Return, "los_level" },
|
|
||||||
{ R"("none")", ResultType::Return, "none" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("__ym")", ResultType::Return, "__ym" },
|
|
||||||
{ R"("ecommerce")", ResultType::Return, "ecommerce" },
|
|
||||||
{ R"("impressions")", ResultType::Return, "impressions" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("863813")", ResultType::Return, "863813" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Happy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Happy, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("863839")", ResultType::Return, "863839" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("863847")", ResultType::Return, "863847" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911480")", ResultType::Return, "911480" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Puppy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Puppy, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911484")", ResultType::Return, "911484" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Little bears, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Little bears, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911489")", ResultType::Return, "911489" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж")", ResultType::Return, "Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911496")", ResultType::Return, "911496" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Pretty, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Pretty, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911504")", ResultType::Return, "911504" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911508")", ResultType::Return, "911508" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Kittens, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Kittens, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911512")", ResultType::Return, "911512" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911516")", ResultType::Return, "911516" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911520")", ResultType::Return, "911520" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911524")", ResultType::Return, "911524" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("911528")", ResultType::Return, "911528" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Футболка детская 3D Turtle, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Turtle, возраст 1-2 года, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("390.00")", ResultType::Return, "390.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("888616")", ResultType::Return, "888616" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ "\"3Д Футболка мужская \\\"Collorista\\\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж\"", ResultType::Return, "3Д Футболка мужская \"Collorista\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Одежда и обувь/Мужская одежда/Футболки/")", ResultType::Return, "/Одежда и обувь/Мужская одежда/Футболки/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("406.60")", ResultType::Return, "406.60" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("913361")", ResultType::Return, "913361" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("470.00")", ResultType::Return, "470.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("913364")", ResultType::Return, "913364" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("470.00")", ResultType::Return, "470.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("913367")", ResultType::Return, "913367" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("470.00")", ResultType::Return, "470.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("913385")", ResultType::Return, "913385" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("470.00")", ResultType::Return, "470.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("913391")", ResultType::Return, "913391" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("")", ResultType::Return, "" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("470.00")", ResultType::Return, "470.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
|
|
||||||
{ R"("usertype")", ResultType::Return, "usertype" },
|
|
||||||
{ R"("visitor")", ResultType::Return, "visitor" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("__ym")", ResultType::Return, "__ym" },
|
|
||||||
{ R"("ecommerce")", ResultType::Return, "ecommerce" },
|
|
||||||
{ R"("impressions")", ResultType::Return, "impressions" },
|
|
||||||
{ R"("experiments")", ResultType::Return, "experiments" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("los_portal")", ResultType::Return, "los_portal" },
|
|
||||||
{ R"("los_level")", ResultType::Return, "los_level" },
|
|
||||||
{ R"("none")", ResultType::Return, "none" },
|
|
||||||
{ R"("experiments")", ResultType::Return, "experiments" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("los_portal")", ResultType::Return, "los_portal" },
|
|
||||||
{ R"("los_level")", ResultType::Return, "los_level" },
|
|
||||||
{ R"("none")", ResultType::Return, "none" },
|
|
||||||
{ R"("experiments")", ResultType::Return, "experiments" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("los_portal")", ResultType::Return, "los_portal" },
|
|
||||||
{ R"("los_level")", ResultType::Return, "los_level" },
|
|
||||||
{ R"("none")", ResultType::Return, "none" },
|
|
||||||
{ R"("experiments")", ResultType::Return, "experiments" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("los_portal")", ResultType::Return, "los_portal" },
|
|
||||||
{ R"("los_level")", ResultType::Return, "los_level" },
|
|
||||||
{ R"("none")", ResultType::Return, "none" },
|
|
||||||
{ R"("experiments")", ResultType::Return, "experiments" },
|
|
||||||
{ R"("lang")", ResultType::Return, "lang" },
|
|
||||||
{ R"("ru")", ResultType::Return, "ru" },
|
|
||||||
{ R"("los_portal")", ResultType::Return, "los_portal" },
|
|
||||||
{ R"("los_level")", ResultType::Return, "los_level" },
|
|
||||||
{ R"("none")", ResultType::Return, "none" },
|
|
||||||
{ R"("__ym")", ResultType::Return, "__ym" },
|
|
||||||
{ R"("ecommerce")", ResultType::Return, "ecommerce" },
|
|
||||||
{ R"("currencyCode")", ResultType::Return, "currencyCode" },
|
|
||||||
{ R"("RUR")", ResultType::Return, "RUR" },
|
|
||||||
{ R"("impressions")", ResultType::Return, "impressions" },
|
|
||||||
{ R"("name")", ResultType::Return, "name" },
|
|
||||||
{ R"("Чайник электрический Mystery MEK-1627, белый")", ResultType::Return, "Чайник электрический Mystery MEK-1627, белый" },
|
|
||||||
{ R"("brand")", ResultType::Return, "brand" },
|
|
||||||
{ R"("Mystery")", ResultType::Return, "Mystery" },
|
|
||||||
{ R"("id")", ResultType::Return, "id" },
|
|
||||||
{ R"("187180")", ResultType::Return, "187180" },
|
|
||||||
{ R"("category")", ResultType::Return, "category" },
|
|
||||||
{ R"("Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery")", ResultType::Return, "Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery" },
|
|
||||||
{ R"("variant")", ResultType::Return, "variant" },
|
|
||||||
{ R"("В наличии")", ResultType::Return, "В наличии" },
|
|
||||||
{ R"("price")", ResultType::Return, "price" },
|
|
||||||
{ R"("1630.00")", ResultType::Return, "1630.00" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ R"("Карточка")", ResultType::Return, "Карточка" },
|
|
||||||
{ R"("position")", ResultType::Return, "position" },
|
|
||||||
{ R"("detail")", ResultType::Return, "detail" },
|
|
||||||
{ R"("actionField")", ResultType::Return, "actionField" },
|
|
||||||
{ R"("list")", ResultType::Return, "list" },
|
|
||||||
{ "\0\"", ResultType::Throw, "JSON: expected \", got \0" },
|
|
||||||
{ "\"/igrushki/konstruktory\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Творчество/Рисование/Инструменты и кра\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\0t", ResultType::Throw, "JSON: expected \", got \0" },
|
|
||||||
{ "\"/Хозтовары/Хранение вещей и организа\xD1\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Хозтовары/Товары для стир\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"li\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/kosmetika-i-parfyum/parfyumeriya/mu\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/ko\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "", ResultType::Throw, "JSON: begin >= end." },
|
|
||||||
{ "\"/stroitelstvo-i-remont/stroit\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/s\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Строительство и ремонт/Строительный инструмент/Изм\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/avto/soputstvuy\0l", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/str\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xFF", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Мелкая бытовая техника/Мелки\xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Пряжа \\\"Бамбук стрейч\\0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Карандаш чёрнографитны\xD0\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0l", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"ca\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"ca\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Канцтовары/Ежедневники и блокн\xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/kanctovary/ezhednevniki-i-blok\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Стакан \xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\x80", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"c\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Органайзер для хранения аксессуаров, \0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"quantity\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Сменный блок для тетрадей на кольцах А5, 160 листов клетка, офсет \xE2\x84\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Сувениры/Ф\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"\0\"", ResultType::Return, "\0" },
|
|
||||||
{ "\"\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"va\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"ca\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"В \0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/letnie-tovary/z\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Посудомоечная машина Ha\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Крупная бытов\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Полочная акустическая система Magnat Needl\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"brand\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"pos\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"c\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"var\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Телевизоры и видеотехника/Всё для домашних кинотеатр\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Флеш-диск Transcend JetFlash 620 8GB (TS8GJF62\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Табурет Мег\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"variant\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Катал\xD0\0\"", ResultType::Return, "Катал\xD0\0" },
|
|
||||||
{ "\"К\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Полочная акустическая система Magnat Needl\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"brand\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"pos\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"c\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"17\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/igrushki/razvivayusc\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Ключница \\\"\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Игр\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Игрушки/Игрушки для девочек/Игровые модули дл\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Крупная бытовая техника/Стиральные машины/С фронт\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\0 ", ResultType::Throw, "JSON: expected \", got \0" },
|
|
||||||
{ "\"Светодиодная лента SMD3528, 5 м. IP33, 60LED, зеленый, 4,8W/мет\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Сантехника/Мебель для ванных комнат/Стол\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\0o", ResultType::Throw, "JSON: expected \", got \0" },
|
|
||||||
{ "\"/igrushki/konstruktory\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/posuda/kuhonnye-prinadlezhnosti-i-instrumenty/kuhonnye-pr\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Творчество/Рисование/Инструменты и кра\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\0 ", ResultType::Throw, "JSON: expected \", got \0" },
|
|
||||||
{ "\"/Хозтовары/Хранение вещей и организа\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Хозтовары/Товары для стир\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"li\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/igrushki/igrus\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/kosmetika-i-parfyum/parfyumeriya/mu\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/ko\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/avto/avtomobilnyy\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/stroitelstvo-i-remont/stroit\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/s\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Строительство и ремонт/Строительный инструмент/Изм\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/avto/soputstvuy\0\"", ResultType::Return, "/avto/soputstvuy\0" },
|
|
||||||
{ "\"/str\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Чайник электрический Vitesse\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Мелкая бытовая техника/Мелки\xD0\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Пряжа \\\"Бамбук стрейч\\0о", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Карандаш чёрнографитны\xD0\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0\"", ResultType::Return, "/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0" },
|
|
||||||
{ "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"ca\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Подаро\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Средство для прочис\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"i\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/p\0\"", ResultType::Return, "/p\0" },
|
|
||||||
{ "\"/Сувениры/Магниты, н\xD0\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Дерев\xD0\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/prazdniki/svadba/svadebnaya-c\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Канцт\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Праздники/То\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"v\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Косметика \xD0\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Спорт и отдых/Настольные игры/Покер, руле\xD1\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"categ\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/retailr\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/retailrocket\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Ежедневник недат А5 140л кл,ляссе,обл пв\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/432809/ezhednevnik-organayzer-sredniy-s-remeshkom-na-knopke-v-oblozhke-kalkulyator-kalendar-do-\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/Канцтовары/Ежедневники и блокн\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"/kanctovary/ezhednevniki-i-blok\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Стакан \xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
|
|
||||||
{ "\"c\0\"", ResultType::Return, "c\0" },
|
|
||||||
};
|
|
||||||
|
|
||||||
for (auto i : boost::irange(0, 1/*00000*/))
|
|
||||||
{
|
|
||||||
static_cast<void>(i);
|
|
||||||
|
|
||||||
for (auto & r : test_data)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
JSON j(r.input, r.input + strlen(r.input));
|
|
||||||
|
|
||||||
ASSERT_EQ(j.getString(), r.result);
|
|
||||||
ASSERT_TRUE(r.result_type == ResultType::Return);
|
|
||||||
}
|
|
||||||
catch (JSONException & e)
|
|
||||||
{
|
|
||||||
ASSERT_TRUE(r.result_type == ResultType::Throw);
|
|
||||||
ASSERT_EQ(e.message(), r.result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -243,15 +243,15 @@ struct integer<Bits, Signed>::_impl
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const T alpha = t / max_int;
|
const T alpha = t / static_cast<T>(max_int);
|
||||||
|
|
||||||
if (alpha <= max_int)
|
if (alpha <= static_cast<T>(max_int))
|
||||||
self = static_cast<uint64_t>(alpha);
|
self = static_cast<uint64_t>(alpha);
|
||||||
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
|
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
|
||||||
set_multiplier<double>(self, alpha);
|
set_multiplier<double>(self, alpha);
|
||||||
|
|
||||||
self *= max_int;
|
self *= max_int;
|
||||||
self += static_cast<uint64_t>(t - alpha * max_int); // += b_i
|
self += static_cast<uint64_t>(t - alpha * static_cast<T>(max_int)); // += b_i
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr static void wide_integer_from_bultin(integer<Bits, Signed>& self, double rhs) noexcept {
|
constexpr static void wide_integer_from_bultin(integer<Bits, Signed>& self, double rhs) noexcept {
|
||||||
@ -265,11 +265,15 @@ struct integer<Bits, Signed>::_impl
|
|||||||
/// As to_Integral does a static_cast to int64_t, it may result in UB.
|
/// As to_Integral does a static_cast to int64_t, it may result in UB.
|
||||||
/// The necessary check here is that long double has enough significant (mantissa) bits to store the
|
/// The necessary check here is that long double has enough significant (mantissa) bits to store the
|
||||||
/// int64_t max value precisely.
|
/// int64_t max value precisely.
|
||||||
|
|
||||||
|
//TODO Be compatible with Apple aarch64
|
||||||
|
#if not (defined(__APPLE__) && defined(__aarch64__))
|
||||||
static_assert(LDBL_MANT_DIG >= 64,
|
static_assert(LDBL_MANT_DIG >= 64,
|
||||||
"On your system long double has less than 64 precision bits,"
|
"On your system long double has less than 64 precision bits,"
|
||||||
"which may result in UB when initializing double from int64_t");
|
"which may result in UB when initializing double from int64_t");
|
||||||
|
#endif
|
||||||
|
|
||||||
if ((rhs > 0 && rhs < max_int) || (rhs < 0 && rhs > min_int))
|
if ((rhs > 0 && rhs < static_cast<long double>(max_int)) || (rhs < 0 && rhs > static_cast<long double>(min_int)))
|
||||||
{
|
{
|
||||||
self = static_cast<int64_t>(rhs);
|
self = static_cast<int64_t>(rhs);
|
||||||
return;
|
return;
|
||||||
|
@ -152,7 +152,7 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
|
|||||||
if (sig != SIGTSTP) /// This signal is used for debugging.
|
if (sig != SIGTSTP) /// This signal is used for debugging.
|
||||||
{
|
{
|
||||||
/// The time that is usually enough for separate thread to print info into log.
|
/// The time that is usually enough for separate thread to print info into log.
|
||||||
sleepForSeconds(10);
|
sleepForSeconds(20); /// FIXME: use some feedback from threads that process stacktrace
|
||||||
call_default_signal_handler(sig);
|
call_default_signal_handler(sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,10 +230,10 @@ public:
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
siginfo_t info;
|
siginfo_t info{};
|
||||||
ucontext_t context;
|
ucontext_t context{};
|
||||||
StackTrace stack_trace(NoCapture{});
|
StackTrace stack_trace(NoCapture{});
|
||||||
UInt32 thread_num;
|
UInt32 thread_num{};
|
||||||
std::string query_id;
|
std::string query_id;
|
||||||
DB::ThreadStatus * thread_ptr{};
|
DB::ThreadStatus * thread_ptr{};
|
||||||
|
|
||||||
@ -311,7 +311,8 @@ private:
|
|||||||
if (stack_trace.getSize())
|
if (stack_trace.getSize())
|
||||||
{
|
{
|
||||||
/// Write bare stack trace (addresses) just in case if we will fail to print symbolized stack trace.
|
/// Write bare stack trace (addresses) just in case if we will fail to print symbolized stack trace.
|
||||||
/// NOTE This still require memory allocations and mutex lock inside logger. BTW we can also print it to stderr using write syscalls.
|
/// NOTE: This still require memory allocations and mutex lock inside logger.
|
||||||
|
/// BTW we can also print it to stderr using write syscalls.
|
||||||
|
|
||||||
std::stringstream bare_stacktrace;
|
std::stringstream bare_stacktrace;
|
||||||
bare_stacktrace << "Stack trace:";
|
bare_stacktrace << "Stack trace:";
|
||||||
@ -324,7 +325,7 @@ private:
|
|||||||
/// Write symbolized stack trace line by line for better grep-ability.
|
/// Write symbolized stack trace line by line for better grep-ability.
|
||||||
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
|
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
|
||||||
|
|
||||||
#if defined(__linux__)
|
#if defined(OS_LINUX)
|
||||||
/// Write information about binary checksum. It can be difficult to calculate, so do it only after printing stack trace.
|
/// Write information about binary checksum. It can be difficult to calculate, so do it only after printing stack trace.
|
||||||
String calculated_binary_hash = getHashOfLoadedBinaryHex();
|
String calculated_binary_hash = getHashOfLoadedBinaryHex();
|
||||||
if (daemon.stored_binary_hash.empty())
|
if (daemon.stored_binary_hash.empty())
|
||||||
@ -415,7 +416,9 @@ static void sanitizerDeathCallback()
|
|||||||
else
|
else
|
||||||
log_message = "Terminate called without an active exception";
|
log_message = "Terminate called without an active exception";
|
||||||
|
|
||||||
static const size_t buf_size = 1024;
|
/// POSIX.1 says that write(2)s of less than PIPE_BUF bytes must be atomic - man 7 pipe
|
||||||
|
/// And the buffer should not be too small because our exception messages can be large.
|
||||||
|
static constexpr size_t buf_size = PIPE_BUF;
|
||||||
|
|
||||||
if (log_message.size() > buf_size - 16)
|
if (log_message.size() > buf_size - 16)
|
||||||
log_message.resize(buf_size - 16);
|
log_message.resize(buf_size - 16);
|
||||||
@ -561,6 +564,7 @@ void debugIncreaseOOMScore()
|
|||||||
{
|
{
|
||||||
DB::WriteBufferFromFile buf("/proc/self/oom_score_adj");
|
DB::WriteBufferFromFile buf("/proc/self/oom_score_adj");
|
||||||
buf.write(new_score.c_str(), new_score.size());
|
buf.write(new_score.c_str(), new_score.size());
|
||||||
|
buf.close();
|
||||||
}
|
}
|
||||||
catch (const Poco::Exception & e)
|
catch (const Poco::Exception & e)
|
||||||
{
|
{
|
||||||
@ -783,7 +787,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
|||||||
/// Setup signal handlers.
|
/// Setup signal handlers.
|
||||||
/// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime.
|
/// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime.
|
||||||
|
|
||||||
addSignalHandler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP}, signalHandler, &handled_signals);
|
addSignalHandler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP, SIGTRAP}, signalHandler, &handled_signals);
|
||||||
addSignalHandler({SIGHUP, SIGUSR1}, closeLogsSignalHandler, &handled_signals);
|
addSignalHandler({SIGHUP, SIGUSR1}, closeLogsSignalHandler, &handled_signals);
|
||||||
addSignalHandler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler, &handled_signals);
|
addSignalHandler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler, &handled_signals);
|
||||||
|
|
||||||
@ -986,7 +990,7 @@ void BaseDaemon::setupWatchdog()
|
|||||||
if (errno == ECHILD)
|
if (errno == ECHILD)
|
||||||
{
|
{
|
||||||
logger().information("Child process no longer exists.");
|
logger().information("Child process no longer exists.");
|
||||||
_exit(status);
|
_exit(WEXITSTATUS(status));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (WIFEXITED(status))
|
if (WIFEXITED(status))
|
||||||
@ -1020,7 +1024,7 @@ void BaseDaemon::setupWatchdog()
|
|||||||
|
|
||||||
/// Automatic restart is not enabled but you can play with it.
|
/// Automatic restart is not enabled but you can play with it.
|
||||||
#if 1
|
#if 1
|
||||||
_exit(status);
|
_exit(WEXITSTATUS(status));
|
||||||
#else
|
#else
|
||||||
logger().information("Will restart.");
|
logger().information("Will restart.");
|
||||||
if (argv0)
|
if (argv0)
|
||||||
|
@ -83,7 +83,7 @@ public:
|
|||||||
template <class T>
|
template <class T>
|
||||||
void writeToGraphite(const std::string & key, const T & value, const std::string & config_name = DEFAULT_GRAPHITE_CONFIG_NAME, time_t timestamp = 0, const std::string & custom_root_path = "")
|
void writeToGraphite(const std::string & key, const T & value, const std::string & config_name = DEFAULT_GRAPHITE_CONFIG_NAME, time_t timestamp = 0, const std::string & custom_root_path = "")
|
||||||
{
|
{
|
||||||
auto writer = getGraphiteWriter(config_name);
|
auto *writer = getGraphiteWriter(config_name);
|
||||||
if (writer)
|
if (writer)
|
||||||
writer->write(key, value, timestamp, custom_root_path);
|
writer->write(key, value, timestamp, custom_root_path);
|
||||||
}
|
}
|
||||||
@ -91,7 +91,7 @@ public:
|
|||||||
template <class T>
|
template <class T>
|
||||||
void writeToGraphite(const GraphiteWriter::KeyValueVector<T> & key_vals, const std::string & config_name = DEFAULT_GRAPHITE_CONFIG_NAME, time_t timestamp = 0, const std::string & custom_root_path = "")
|
void writeToGraphite(const GraphiteWriter::KeyValueVector<T> & key_vals, const std::string & config_name = DEFAULT_GRAPHITE_CONFIG_NAME, time_t timestamp = 0, const std::string & custom_root_path = "")
|
||||||
{
|
{
|
||||||
auto writer = getGraphiteWriter(config_name);
|
auto *writer = getGraphiteWriter(config_name);
|
||||||
if (writer)
|
if (writer)
|
||||||
writer->write(key_vals, timestamp, custom_root_path);
|
writer->write(key_vals, timestamp, custom_root_path);
|
||||||
}
|
}
|
||||||
@ -99,7 +99,7 @@ public:
|
|||||||
template <class T>
|
template <class T>
|
||||||
void writeToGraphite(const GraphiteWriter::KeyValueVector<T> & key_vals, const std::chrono::system_clock::time_point & current_time, const std::string & custom_root_path)
|
void writeToGraphite(const GraphiteWriter::KeyValueVector<T> & key_vals, const std::chrono::system_clock::time_point & current_time, const std::string & custom_root_path)
|
||||||
{
|
{
|
||||||
auto writer = getGraphiteWriter();
|
auto *writer = getGraphiteWriter();
|
||||||
if (writer)
|
if (writer)
|
||||||
writer->write(key_vals, std::chrono::system_clock::to_time_t(current_time), custom_root_path);
|
writer->write(key_vals, std::chrono::system_clock::to_time_t(current_time), custom_root_path);
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,11 @@ add_library (daemon
|
|||||||
)
|
)
|
||||||
|
|
||||||
target_include_directories (daemon PUBLIC ..)
|
target_include_directories (daemon PUBLIC ..)
|
||||||
|
|
||||||
|
if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
|
||||||
|
target_link_libraries (daemon PUBLIC -Wl,-undefined,dynamic_lookup)
|
||||||
|
endif()
|
||||||
|
|
||||||
target_link_libraries (daemon PUBLIC loggers PRIVATE clickhouse_common_io clickhouse_common_config common ${EXECINFO_LIBRARIES})
|
target_link_libraries (daemon PUBLIC loggers PRIVATE clickhouse_common_io clickhouse_common_config common ${EXECINFO_LIBRARIES})
|
||||||
|
|
||||||
if (USE_SENTRY)
|
if (USE_SENTRY)
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <common/getMemoryAmount.h>
|
#include <common/getMemoryAmount.h>
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
|
|
||||||
|
#include <Common/formatReadable.h>
|
||||||
#include <Common/SymbolIndex.h>
|
#include <Common/SymbolIndex.h>
|
||||||
#include <Common/StackTrace.h>
|
#include <Common/StackTrace.h>
|
||||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||||
|
68
base/ext/scope_guard_safe.h
Normal file
68
base/ext/scope_guard_safe.h
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <ext/scope_guard.h>
|
||||||
|
#include <common/logger_useful.h>
|
||||||
|
#include <Common/MemoryTracker.h>
|
||||||
|
|
||||||
|
/// Same as SCOPE_EXIT() but block the MEMORY_LIMIT_EXCEEDED errors.
|
||||||
|
///
|
||||||
|
/// Typical example of SCOPE_EXIT_MEMORY() usage is when code under it may do
|
||||||
|
/// some tiny allocations, that may fail under high memory pressure or/and low
|
||||||
|
/// max_memory_usage (and related limits).
|
||||||
|
///
|
||||||
|
/// NOTE: it should be used with caution.
|
||||||
|
#define SCOPE_EXIT_MEMORY(...) SCOPE_EXIT( \
|
||||||
|
MemoryTracker::LockExceptionInThread \
|
||||||
|
lock_memory_tracker(VariableContext::Global); \
|
||||||
|
__VA_ARGS__; \
|
||||||
|
)
|
||||||
|
|
||||||
|
/// Same as SCOPE_EXIT() but try/catch/tryLogCurrentException any exceptions.
|
||||||
|
///
|
||||||
|
/// SCOPE_EXIT_SAFE() should be used in case the exception during the code
|
||||||
|
/// under SCOPE_EXIT() is not "that fatal" and error message in log is enough.
|
||||||
|
///
|
||||||
|
/// Good example is calling CurrentThread::detachQueryIfNotDetached().
|
||||||
|
///
|
||||||
|
/// Anti-pattern is calling WriteBuffer::finalize() under SCOPE_EXIT_SAFE()
|
||||||
|
/// (since finalize() can do final write and it is better to fail abnormally
|
||||||
|
/// instead of ignoring write error).
|
||||||
|
///
|
||||||
|
/// NOTE: it should be used with double caution.
|
||||||
|
#define SCOPE_EXIT_SAFE(...) SCOPE_EXIT( \
|
||||||
|
try \
|
||||||
|
{ \
|
||||||
|
__VA_ARGS__; \
|
||||||
|
} \
|
||||||
|
catch (...) \
|
||||||
|
{ \
|
||||||
|
tryLogCurrentException(__PRETTY_FUNCTION__); \
|
||||||
|
} \
|
||||||
|
)
|
||||||
|
|
||||||
|
/// Same as SCOPE_EXIT() but:
|
||||||
|
/// - block the MEMORY_LIMIT_EXCEEDED errors,
|
||||||
|
/// - try/catch/tryLogCurrentException any exceptions.
|
||||||
|
///
|
||||||
|
/// SCOPE_EXIT_MEMORY_SAFE() can be used when the error can be ignored, and in
|
||||||
|
/// addition to SCOPE_EXIT_SAFE() it will also lock MEMORY_LIMIT_EXCEEDED to
|
||||||
|
/// avoid such exceptions.
|
||||||
|
///
|
||||||
|
/// It does exists as a separate helper, since you do not need to lock
|
||||||
|
/// MEMORY_LIMIT_EXCEEDED always (there are cases when code under SCOPE_EXIT does
|
||||||
|
/// not do any allocations, while LockExceptionInThread increment atomic
|
||||||
|
/// variable).
|
||||||
|
///
|
||||||
|
/// NOTE: it should be used with triple caution.
|
||||||
|
#define SCOPE_EXIT_MEMORY_SAFE(...) SCOPE_EXIT( \
|
||||||
|
try \
|
||||||
|
{ \
|
||||||
|
MemoryTracker::LockExceptionInThread \
|
||||||
|
lock_memory_tracker(VariableContext::Global); \
|
||||||
|
__VA_ARGS__; \
|
||||||
|
} \
|
||||||
|
catch (...) \
|
||||||
|
{ \
|
||||||
|
tryLogCurrentException(__PRETTY_FUNCTION__); \
|
||||||
|
} \
|
||||||
|
)
|
@ -1,5 +1,8 @@
|
|||||||
if (GLIBC_COMPATIBILITY)
|
if (GLIBC_COMPATIBILITY)
|
||||||
set (ENABLE_FASTMEMCPY ON)
|
add_subdirectory(memcpy)
|
||||||
|
if(TARGET memcpy)
|
||||||
|
set(MEMCPY_LIBRARY memcpy)
|
||||||
|
endif()
|
||||||
|
|
||||||
enable_language(ASM)
|
enable_language(ASM)
|
||||||
include(CheckIncludeFile)
|
include(CheckIncludeFile)
|
||||||
@ -27,13 +30,6 @@ if (GLIBC_COMPATIBILITY)
|
|||||||
list(APPEND glibc_compatibility_sources musl/getentropy.c)
|
list(APPEND glibc_compatibility_sources musl/getentropy.c)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (NOT ARCH_ARM)
|
|
||||||
# clickhouse_memcpy don't support ARCH_ARM, see https://github.com/ClickHouse/ClickHouse/issues/18951
|
|
||||||
add_library (clickhouse_memcpy OBJECT
|
|
||||||
${ClickHouse_SOURCE_DIR}/contrib/FastMemcpy/memcpy_wrapper.c
|
|
||||||
)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Need to omit frame pointers to match the performance of glibc
|
# Need to omit frame pointers to match the performance of glibc
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
|
||||||
|
|
||||||
@ -51,15 +47,16 @@ if (GLIBC_COMPATIBILITY)
|
|||||||
target_compile_options(glibc-compatibility PRIVATE -fPIC)
|
target_compile_options(glibc-compatibility PRIVATE -fPIC)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
target_link_libraries(global-libs INTERFACE glibc-compatibility)
|
target_link_libraries(global-libs INTERFACE glibc-compatibility ${MEMCPY_LIBRARY})
|
||||||
|
|
||||||
install(
|
install(
|
||||||
TARGETS glibc-compatibility
|
TARGETS glibc-compatibility ${MEMCPY_LIBRARY}
|
||||||
EXPORT global
|
EXPORT global
|
||||||
ARCHIVE DESTINATION lib
|
ARCHIVE DESTINATION lib
|
||||||
)
|
)
|
||||||
|
|
||||||
message (STATUS "Some symbols from glibc will be replaced for compatibility")
|
message (STATUS "Some symbols from glibc will be replaced for compatibility")
|
||||||
|
|
||||||
elseif (YANDEX_OFFICIAL_BUILD)
|
elseif (YANDEX_OFFICIAL_BUILD)
|
||||||
message (WARNING "Option GLIBC_COMPATIBILITY must be turned on for production builds.")
|
message (WARNING "Option GLIBC_COMPATIBILITY must be turned on for production builds.")
|
||||||
endif ()
|
endif ()
|
||||||
|
8
base/glibc-compatibility/memcpy/CMakeLists.txt
Normal file
8
base/glibc-compatibility/memcpy/CMakeLists.txt
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
if (ARCH_AMD64)
|
||||||
|
add_library(memcpy STATIC memcpy.cpp)
|
||||||
|
|
||||||
|
# We allow to include memcpy.h from user code for better inlining.
|
||||||
|
target_include_directories(memcpy PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
|
||||||
|
|
||||||
|
target_compile_options(memcpy PRIVATE -fno-builtin-memcpy)
|
||||||
|
endif ()
|
6
base/glibc-compatibility/memcpy/memcpy.cpp
Normal file
6
base/glibc-compatibility/memcpy/memcpy.cpp
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#include "memcpy.h"
|
||||||
|
|
||||||
|
extern "C" void * memcpy(void * __restrict dst, const void * __restrict src, size_t size)
|
||||||
|
{
|
||||||
|
return inline_memcpy(dst, src, size);
|
||||||
|
}
|
217
base/glibc-compatibility/memcpy/memcpy.h
Normal file
217
base/glibc-compatibility/memcpy/memcpy.h
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
#include <emmintrin.h>
|
||||||
|
|
||||||
|
|
||||||
|
/** Custom memcpy implementation for ClickHouse.
|
||||||
|
* It has the following benefits over using glibc's implementation:
|
||||||
|
* 1. Avoiding dependency on specific version of glibc's symbol, like memcpy@@GLIBC_2.14 for portability.
|
||||||
|
* 2. Avoiding indirect call via PLT due to shared linking, that can be less efficient.
|
||||||
|
* 3. It's possible to include this header and call inline_memcpy directly for better inlining or interprocedural analysis.
|
||||||
|
* 4. Better results on our performance tests on current CPUs: up to 25% on some queries and up to 0.7%..1% in average across all queries.
|
||||||
|
*
|
||||||
|
* Writing our own memcpy is extremely difficult for the following reasons:
|
||||||
|
* 1. The optimal variant depends on the specific CPU model.
|
||||||
|
* 2. The optimal variant depends on the distribution of size arguments.
|
||||||
|
* 3. It depends on the number of threads copying data concurrently.
|
||||||
|
* 4. It also depends on how the calling code is using the copied data and how the different memcpy calls are related to each other.
|
||||||
|
* Due to vast range of scenarios it makes proper testing especially difficult.
|
||||||
|
* When writing our own memcpy there is a risk to overoptimize it
|
||||||
|
* on non-representative microbenchmarks while making real-world use cases actually worse.
|
||||||
|
*
|
||||||
|
* Most of the benchmarks for memcpy on the internet are wrong.
|
||||||
|
*
|
||||||
|
* Let's look at the details:
|
||||||
|
*
|
||||||
|
* For small size, the order of branches in code is important.
|
||||||
|
* There are variants with specific order of branches (like here or in glibc)
|
||||||
|
* or with jump table (in asm code see example from Cosmopolitan libc:
|
||||||
|
* https://github.com/jart/cosmopolitan/blob/de09bec215675e9b0beb722df89c6f794da74f3f/libc/nexgen32e/memcpy.S#L61)
|
||||||
|
* or with Duff device in C (see https://github.com/skywind3000/FastMemcpy/)
|
||||||
|
*
|
||||||
|
* It's also important how to copy uneven sizes.
|
||||||
|
* Almost every implementation, including this, is using two overlapping movs.
|
||||||
|
*
|
||||||
|
* It is important to disable -ftree-loop-distribute-patterns when compiling memcpy implementation,
|
||||||
|
* otherwise the compiler can replace internal loops to a call to memcpy that will lead to infinite recursion.
|
||||||
|
*
|
||||||
|
* For larger sizes it's important to choose the instructions used:
|
||||||
|
* - SSE or AVX or AVX-512;
|
||||||
|
* - rep movsb;
|
||||||
|
* Performance will depend on the size threshold, on the CPU model, on the "erms" flag
|
||||||
|
* ("Enhansed Rep MovS" - it indicates that performance of "rep movsb" is decent for large sizes)
|
||||||
|
* https://stackoverflow.com/questions/43343231/enhanced-rep-movsb-for-memcpy
|
||||||
|
*
|
||||||
|
* Using AVX-512 can be bad due to throttling.
|
||||||
|
* Using AVX can be bad if most code is using SSE due to switching penalty
|
||||||
|
* (it also depends on the usage of "vzeroupper" instruction).
|
||||||
|
* But in some cases AVX gives a win.
|
||||||
|
*
|
||||||
|
* It also depends on how many times the loop will be unrolled.
|
||||||
|
* We are unrolling the loop 8 times (by the number of available registers), but it not always the best.
|
||||||
|
*
|
||||||
|
* It also depends on the usage of aligned or unaligned loads/stores.
|
||||||
|
* We are using unaligned loads and aligned stores.
|
||||||
|
*
|
||||||
|
* It also depends on the usage of prefetch instructions. It makes sense on some Intel CPUs but can slow down performance on AMD.
|
||||||
|
* Setting up correct offset for prefetching is non-obvious.
|
||||||
|
*
|
||||||
|
* Non-temporary (cache bypassing) stores can be used for very large sizes (more than a half of L3 cache).
|
||||||
|
* But the exact threshold is unclear - when doing memcpy from multiple threads the optimal threshold can be lower,
|
||||||
|
* because L3 cache is shared (and L2 cache is partially shared).
|
||||||
|
*
|
||||||
|
* Very large size of memcpy typically indicates suboptimal (not cache friendly) algorithms in code or unrealistic scenarios,
|
||||||
|
* so we don't pay attention to using non-temporary stores.
|
||||||
|
*
|
||||||
|
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most benefitial,
|
||||||
|
* even comparing to non-temporary aligned unrolled stores even with the most wide registers.
|
||||||
|
*
|
||||||
|
* memcpy can be written in asm, C or C++. The latter can also use inline asm.
|
||||||
|
* The asm implementation can be better to make sure that compiler won't make the code worse,
|
||||||
|
* to ensure the order of branches, the code layout, the usage of all required registers.
|
||||||
|
* But if it is located in separate translation unit, inlining will not be possible
|
||||||
|
* (inline asm can be used to overcome this limitation).
|
||||||
|
* Sometimes C or C++ code can be further optimized by compiler.
|
||||||
|
* For example, clang is capable replacing SSE intrinsics to AVX code if -mavx is used.
|
||||||
|
*
|
||||||
|
* Please note that compiler can replace plain code to memcpy and vice versa.
|
||||||
|
* - memcpy with compile-time known small size is replaced to simple instructions without a call to memcpy;
|
||||||
|
* it is controlled by -fbuiltin-memcpy and can be manually ensured by calling __builtin_memcpy.
|
||||||
|
* This is often used to implement unaligned load/store without undefined behaviour in C++.
|
||||||
|
* - a loop with copying bytes can be recognized and replaced by a call to memcpy;
|
||||||
|
* it is controlled by -ftree-loop-distribute-patterns.
|
||||||
|
* - also note that a loop with copying bytes can be unrolled, peeled and vectorized that will give you
|
||||||
|
* inline code somewhat similar to a decent implementation of memcpy.
|
||||||
|
*
|
||||||
|
* This description is up to date as of Mar 2021.
|
||||||
|
*
|
||||||
|
* How to test the memcpy implementation for performance:
|
||||||
|
* 1. Test on real production workload.
|
||||||
|
* 2. For synthetic test, see utils/memcpy-bench, but make sure you will do the best to exhaust the wide range of scenarios.
|
||||||
|
*
|
||||||
|
* TODO: Add self-tuning memcpy with bayesian bandits algorithm for large sizes.
|
||||||
|
* See https://habr.com/en/company/yandex/blog/457612/
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
static inline void * inline_memcpy(void * __restrict dst_, const void * __restrict src_, size_t size)
|
||||||
|
{
|
||||||
|
/// We will use pointer arithmetic, so char pointer will be used.
|
||||||
|
/// Note that __restrict makes sense (otherwise compiler will reload data from memory
|
||||||
|
/// instead of using the value of registers due to possible aliasing).
|
||||||
|
char * __restrict dst = reinterpret_cast<char * __restrict>(dst_);
|
||||||
|
const char * __restrict src = reinterpret_cast<const char * __restrict>(src_);
|
||||||
|
|
||||||
|
/// Standard memcpy returns the original value of dst. It is rarely used but we have to do it.
|
||||||
|
/// If you use memcpy with small but non-constant sizes, you can call inline_memcpy directly
|
||||||
|
/// for inlining and removing this single instruction.
|
||||||
|
void * ret = dst;
|
||||||
|
|
||||||
|
tail:
|
||||||
|
/// Small sizes and tails after the loop for large sizes.
|
||||||
|
/// The order of branches is important but in fact the optimal order depends on the distribution of sizes in your application.
|
||||||
|
/// This order of branches is from the disassembly of glibc's code.
|
||||||
|
/// We copy chunks of possibly uneven size with two overlapping movs.
|
||||||
|
/// Example: to copy 5 bytes [0, 1, 2, 3, 4] we will copy tail [1, 2, 3, 4] first and then head [0, 1, 2, 3].
|
||||||
|
if (size <= 16)
|
||||||
|
{
|
||||||
|
if (size >= 8)
|
||||||
|
{
|
||||||
|
/// Chunks of 8..16 bytes.
|
||||||
|
__builtin_memcpy(dst + size - 8, src + size - 8, 8);
|
||||||
|
__builtin_memcpy(dst, src, 8);
|
||||||
|
}
|
||||||
|
else if (size >= 4)
|
||||||
|
{
|
||||||
|
/// Chunks of 4..7 bytes.
|
||||||
|
__builtin_memcpy(dst + size - 4, src + size - 4, 4);
|
||||||
|
__builtin_memcpy(dst, src, 4);
|
||||||
|
}
|
||||||
|
else if (size >= 2)
|
||||||
|
{
|
||||||
|
/// Chunks of 2..3 bytes.
|
||||||
|
__builtin_memcpy(dst + size - 2, src + size - 2, 2);
|
||||||
|
__builtin_memcpy(dst, src, 2);
|
||||||
|
}
|
||||||
|
else if (size >= 1)
|
||||||
|
{
|
||||||
|
/// A single byte.
|
||||||
|
*dst = *src;
|
||||||
|
}
|
||||||
|
/// No bytes remaining.
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/// Medium and large sizes.
|
||||||
|
if (size <= 128)
|
||||||
|
{
|
||||||
|
/// Medium size, not enough for full loop unrolling.
|
||||||
|
|
||||||
|
/// We will copy the last 16 bytes.
|
||||||
|
_mm_storeu_si128(reinterpret_cast<__m128i *>(dst + size - 16), _mm_loadu_si128(reinterpret_cast<const __m128i *>(src + size - 16)));
|
||||||
|
|
||||||
|
/// Then we will copy every 16 bytes from the beginning in a loop.
|
||||||
|
/// The last loop iteration will possibly overwrite some part of already copied last 16 bytes.
|
||||||
|
/// This is Ok, similar to the code for small sizes above.
|
||||||
|
while (size > 16)
|
||||||
|
{
|
||||||
|
_mm_storeu_si128(reinterpret_cast<__m128i *>(dst), _mm_loadu_si128(reinterpret_cast<const __m128i *>(src)));
|
||||||
|
dst += 16;
|
||||||
|
src += 16;
|
||||||
|
size -= 16;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/// Large size with fully unrolled loop.
|
||||||
|
|
||||||
|
/// Align destination to 16 bytes boundary.
|
||||||
|
size_t padding = (16 - (reinterpret_cast<size_t>(dst) & 15)) & 15;
|
||||||
|
|
||||||
|
/// If not aligned - we will copy first 16 bytes with unaligned stores.
|
||||||
|
if (padding > 0)
|
||||||
|
{
|
||||||
|
__m128i head = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src));
|
||||||
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(dst), head);
|
||||||
|
dst += padding;
|
||||||
|
src += padding;
|
||||||
|
size -= padding;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Aligned unrolled copy. We will use half of available SSE registers.
|
||||||
|
/// It's not possible to have both src and dst aligned.
|
||||||
|
/// So, we will use aligned stores and unaligned loads.
|
||||||
|
__m128i c0, c1, c2, c3, c4, c5, c6, c7;
|
||||||
|
|
||||||
|
while (size >= 128)
|
||||||
|
{
|
||||||
|
c0 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 0);
|
||||||
|
c1 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 1);
|
||||||
|
c2 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 2);
|
||||||
|
c3 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 3);
|
||||||
|
c4 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 4);
|
||||||
|
c5 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 5);
|
||||||
|
c6 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 6);
|
||||||
|
c7 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 7);
|
||||||
|
src += 128;
|
||||||
|
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 0), c0);
|
||||||
|
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 1), c1);
|
||||||
|
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 2), c2);
|
||||||
|
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 3), c3);
|
||||||
|
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 4), c4);
|
||||||
|
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 5), c5);
|
||||||
|
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 6), c6);
|
||||||
|
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 7), c7);
|
||||||
|
dst += 128;
|
||||||
|
|
||||||
|
size -= 128;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The latest remaining 0..127 bytes will be processed as usual.
|
||||||
|
goto tail;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
@ -31,7 +31,7 @@ static void *volatile vdso_func = (void *)getcpu_init;
|
|||||||
int sched_getcpu(void)
|
int sched_getcpu(void)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
unsigned cpu;
|
unsigned cpu = 0;
|
||||||
|
|
||||||
#ifdef VDSO_GETCPU_SYM
|
#ifdef VDSO_GETCPU_SYM
|
||||||
getcpu_f f = (getcpu_f)vdso_func;
|
getcpu_f f = (getcpu_f)vdso_func;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake)
|
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
|
||||||
add_headers_and_sources(loggers .)
|
add_headers_and_sources(loggers .)
|
||||||
add_library(loggers ${loggers_sources} ${loggers_headers})
|
add_library(loggers ${loggers_sources} ${loggers_headers})
|
||||||
target_link_libraries(loggers PRIVATE dbms clickhouse_common_io)
|
target_link_libraries(loggers PRIVATE dbms clickhouse_common_io)
|
||||||
|
@ -3,7 +3,6 @@ add_library (mysqlxx
|
|||||||
Exception.cpp
|
Exception.cpp
|
||||||
Query.cpp
|
Query.cpp
|
||||||
ResultBase.cpp
|
ResultBase.cpp
|
||||||
StoreQueryResult.cpp
|
|
||||||
UseQueryResult.cpp
|
UseQueryResult.cpp
|
||||||
Row.cpp
|
Row.cpp
|
||||||
Value.cpp
|
Value.cpp
|
||||||
@ -15,8 +14,8 @@ add_library (mysqlxx
|
|||||||
target_include_directories (mysqlxx PUBLIC ..)
|
target_include_directories (mysqlxx PUBLIC ..)
|
||||||
|
|
||||||
if (USE_INTERNAL_MYSQL_LIBRARY)
|
if (USE_INTERNAL_MYSQL_LIBRARY)
|
||||||
target_include_directories (mysqlxx PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/include)
|
target_include_directories (mysqlxx PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/include")
|
||||||
target_include_directories (mysqlxx PUBLIC ${ClickHouse_BINARY_DIR}/contrib/mariadb-connector-c/include)
|
target_include_directories (mysqlxx PUBLIC "${ClickHouse_BINARY_DIR}/contrib/mariadb-connector-c/include")
|
||||||
else ()
|
else ()
|
||||||
set(PLATFORM_LIBRARIES ${CMAKE_DL_LIBS})
|
set(PLATFORM_LIBRARIES ${CMAKE_DL_LIBS})
|
||||||
|
|
||||||
|
@ -51,10 +51,11 @@ Connection::Connection(
|
|||||||
const char* ssl_key,
|
const char* ssl_key,
|
||||||
unsigned timeout,
|
unsigned timeout,
|
||||||
unsigned rw_timeout,
|
unsigned rw_timeout,
|
||||||
bool enable_local_infile)
|
bool enable_local_infile,
|
||||||
|
bool opt_reconnect)
|
||||||
: Connection()
|
: Connection()
|
||||||
{
|
{
|
||||||
connect(db, server, user, password, port, socket, ssl_ca, ssl_cert, ssl_key, timeout, rw_timeout, enable_local_infile);
|
connect(db, server, user, password, port, socket, ssl_ca, ssl_cert, ssl_key, timeout, rw_timeout, enable_local_infile, opt_reconnect);
|
||||||
}
|
}
|
||||||
|
|
||||||
Connection::Connection(const std::string & config_name)
|
Connection::Connection(const std::string & config_name)
|
||||||
@ -80,7 +81,8 @@ void Connection::connect(const char* db,
|
|||||||
const char * ssl_key,
|
const char * ssl_key,
|
||||||
unsigned timeout,
|
unsigned timeout,
|
||||||
unsigned rw_timeout,
|
unsigned rw_timeout,
|
||||||
bool enable_local_infile)
|
bool enable_local_infile,
|
||||||
|
bool opt_reconnect)
|
||||||
{
|
{
|
||||||
if (is_connected)
|
if (is_connected)
|
||||||
disconnect();
|
disconnect();
|
||||||
@ -104,9 +106,8 @@ void Connection::connect(const char* db,
|
|||||||
if (mysql_options(driver.get(), MYSQL_OPT_LOCAL_INFILE, &enable_local_infile_arg))
|
if (mysql_options(driver.get(), MYSQL_OPT_LOCAL_INFILE, &enable_local_infile_arg))
|
||||||
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
|
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
|
||||||
|
|
||||||
/// Enables auto-reconnect.
|
/// See C API Developer Guide: Automatic Reconnection Control
|
||||||
bool reconnect = true;
|
if (mysql_options(driver.get(), MYSQL_OPT_RECONNECT, reinterpret_cast<const char *>(&opt_reconnect)))
|
||||||
if (mysql_options(driver.get(), MYSQL_OPT_RECONNECT, reinterpret_cast<const char *>(&reconnect)))
|
|
||||||
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
|
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
|
||||||
|
|
||||||
/// Specifies particular ssl key and certificate if it needs
|
/// Specifies particular ssl key and certificate if it needs
|
||||||
@ -116,8 +117,8 @@ void Connection::connect(const char* db,
|
|||||||
if (!mysql_real_connect(driver.get(), server, user, password, db, port, ifNotEmpty(socket), driver->client_flag))
|
if (!mysql_real_connect(driver.get(), server, user, password, db, port, ifNotEmpty(socket), driver->client_flag))
|
||||||
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
|
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
|
||||||
|
|
||||||
/// Sets UTF-8 as default encoding.
|
/// Sets UTF-8 as default encoding. See https://mariadb.com/kb/en/mysql_set_character_set/
|
||||||
if (mysql_set_character_set(driver.get(), "UTF8"))
|
if (mysql_set_character_set(driver.get(), "utf8mb4"))
|
||||||
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
|
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
|
||||||
|
|
||||||
is_connected = true;
|
is_connected = true;
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
|
|
||||||
/// Disable LOAD DATA LOCAL INFILE because it is insecure
|
/// Disable LOAD DATA LOCAL INFILE because it is insecure
|
||||||
#define MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE false
|
#define MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE false
|
||||||
|
/// See https://dev.mysql.com/doc/c-api/5.7/en/c-api-auto-reconnect.html
|
||||||
|
#define MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT true
|
||||||
|
|
||||||
|
|
||||||
namespace mysqlxx
|
namespace mysqlxx
|
||||||
@ -39,7 +41,6 @@ private:
|
|||||||
/** MySQL connection.
|
/** MySQL connection.
|
||||||
* Usage:
|
* Usage:
|
||||||
* mysqlxx::Connection connection("Test", "127.0.0.1", "root", "qwerty", 3306);
|
* mysqlxx::Connection connection("Test", "127.0.0.1", "root", "qwerty", 3306);
|
||||||
* std::cout << connection.query("SELECT 'Hello, World!'").store().at(0).at(0).getString() << std::endl;
|
|
||||||
*
|
*
|
||||||
* Or with Poco library configuration:
|
* Or with Poco library configuration:
|
||||||
* mysqlxx::Connection connection("mysql_params");
|
* mysqlxx::Connection connection("mysql_params");
|
||||||
@ -77,7 +78,8 @@ public:
|
|||||||
const char * ssl_key = "",
|
const char * ssl_key = "",
|
||||||
unsigned timeout = MYSQLXX_DEFAULT_TIMEOUT,
|
unsigned timeout = MYSQLXX_DEFAULT_TIMEOUT,
|
||||||
unsigned rw_timeout = MYSQLXX_DEFAULT_RW_TIMEOUT,
|
unsigned rw_timeout = MYSQLXX_DEFAULT_RW_TIMEOUT,
|
||||||
bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
|
bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE,
|
||||||
|
bool opt_reconnect = MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
|
||||||
|
|
||||||
/// Creates connection. Can be used if Poco::Util::Application is using.
|
/// Creates connection. Can be used if Poco::Util::Application is using.
|
||||||
/// All settings will be got from config_name section of configuration.
|
/// All settings will be got from config_name section of configuration.
|
||||||
@ -97,7 +99,8 @@ public:
|
|||||||
const char* ssl_key,
|
const char* ssl_key,
|
||||||
unsigned timeout = MYSQLXX_DEFAULT_TIMEOUT,
|
unsigned timeout = MYSQLXX_DEFAULT_TIMEOUT,
|
||||||
unsigned rw_timeout = MYSQLXX_DEFAULT_RW_TIMEOUT,
|
unsigned rw_timeout = MYSQLXX_DEFAULT_RW_TIMEOUT,
|
||||||
bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
|
bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE,
|
||||||
|
bool opt_reconnect = MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
|
||||||
|
|
||||||
void connect(const std::string & config_name)
|
void connect(const std::string & config_name)
|
||||||
{
|
{
|
||||||
@ -113,6 +116,7 @@ public:
|
|||||||
std::string ssl_cert = cfg.getString(config_name + ".ssl_cert", "");
|
std::string ssl_cert = cfg.getString(config_name + ".ssl_cert", "");
|
||||||
std::string ssl_key = cfg.getString(config_name + ".ssl_key", "");
|
std::string ssl_key = cfg.getString(config_name + ".ssl_key", "");
|
||||||
bool enable_local_infile = cfg.getBool(config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
|
bool enable_local_infile = cfg.getBool(config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
|
||||||
|
bool opt_reconnect = cfg.getBool(config_name + ".opt_reconnect", MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
|
||||||
|
|
||||||
unsigned timeout =
|
unsigned timeout =
|
||||||
cfg.getInt(config_name + ".connect_timeout",
|
cfg.getInt(config_name + ".connect_timeout",
|
||||||
@ -136,7 +140,8 @@ public:
|
|||||||
ssl_key.c_str(),
|
ssl_key.c_str(),
|
||||||
timeout,
|
timeout,
|
||||||
rw_timeout,
|
rw_timeout,
|
||||||
enable_local_infile);
|
enable_local_infile,
|
||||||
|
opt_reconnect);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If MySQL connection was established.
|
/// If MySQL connection was established.
|
||||||
|
@ -26,6 +26,15 @@ struct ConnectionFailed : public Exception
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/// Connection to MySQL server was lost
|
||||||
|
struct ConnectionLost : public Exception
|
||||||
|
{
|
||||||
|
ConnectionLost(const std::string & msg, int code = 0) : Exception(msg, code) {}
|
||||||
|
const char * name() const throw() override { return "mysqlxx::ConnectionLost"; }
|
||||||
|
const char * className() const throw() override { return "mysqlxx::ConnectionLost"; }
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
/// Erroneous query.
|
/// Erroneous query.
|
||||||
struct BadQuery : public Exception
|
struct BadQuery : public Exception
|
||||||
{
|
{
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
|
|
||||||
#include <common/sleep.h>
|
#include <common/sleep.h>
|
||||||
|
|
||||||
#include <Poco/Util/Application.h>
|
|
||||||
#include <Poco/Util/LayeredConfiguration.h>
|
#include <Poco/Util/LayeredConfiguration.h>
|
||||||
|
|
||||||
|
|
||||||
@ -41,7 +40,9 @@ void Pool::Entry::decrementRefCount()
|
|||||||
Pool::Pool(const Poco::Util::AbstractConfiguration & cfg, const std::string & config_name,
|
Pool::Pool(const Poco::Util::AbstractConfiguration & cfg, const std::string & config_name,
|
||||||
unsigned default_connections_, unsigned max_connections_,
|
unsigned default_connections_, unsigned max_connections_,
|
||||||
const char * parent_config_name_)
|
const char * parent_config_name_)
|
||||||
: default_connections(default_connections_), max_connections(max_connections_)
|
: logger(Poco::Logger::get("mysqlxx::Pool"))
|
||||||
|
, default_connections(default_connections_)
|
||||||
|
, max_connections(max_connections_)
|
||||||
{
|
{
|
||||||
server = cfg.getString(config_name + ".host");
|
server = cfg.getString(config_name + ".host");
|
||||||
|
|
||||||
@ -78,6 +79,9 @@ Pool::Pool(const Poco::Util::AbstractConfiguration & cfg, const std::string & co
|
|||||||
|
|
||||||
enable_local_infile = cfg.getBool(config_name + ".enable_local_infile",
|
enable_local_infile = cfg.getBool(config_name + ".enable_local_infile",
|
||||||
cfg.getBool(parent_config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE));
|
cfg.getBool(parent_config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE));
|
||||||
|
|
||||||
|
opt_reconnect = cfg.getBool(config_name + ".opt_reconnect",
|
||||||
|
cfg.getBool(parent_config_name + ".opt_reconnect", MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -96,6 +100,8 @@ Pool::Pool(const Poco::Util::AbstractConfiguration & cfg, const std::string & co
|
|||||||
|
|
||||||
enable_local_infile = cfg.getBool(
|
enable_local_infile = cfg.getBool(
|
||||||
config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
|
config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
|
||||||
|
|
||||||
|
opt_reconnect = cfg.getBool(config_name + ".opt_reconnect", MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
|
||||||
}
|
}
|
||||||
|
|
||||||
connect_timeout = cfg.getInt(config_name + ".connect_timeout",
|
connect_timeout = cfg.getInt(config_name + ".connect_timeout",
|
||||||
@ -125,20 +131,30 @@ Pool::Entry Pool::get()
|
|||||||
initialize();
|
initialize();
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
|
logger.trace("(%s): Iterating through existing MySQL connections", getDescription());
|
||||||
|
|
||||||
for (auto & connection : connections)
|
for (auto & connection : connections)
|
||||||
{
|
{
|
||||||
if (connection->ref_count == 0)
|
if (connection->ref_count == 0)
|
||||||
return Entry(connection, this);
|
return Entry(connection, this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.trace("(%s): Trying to allocate a new connection.", getDescription());
|
||||||
if (connections.size() < static_cast<size_t>(max_connections))
|
if (connections.size() < static_cast<size_t>(max_connections))
|
||||||
{
|
{
|
||||||
Connection * conn = allocConnection();
|
Connection * conn = allocConnection();
|
||||||
if (conn)
|
if (conn)
|
||||||
return Entry(conn, this);
|
return Entry(conn, this);
|
||||||
|
|
||||||
|
logger.trace("(%s): Unable to create a new connection: Allocation failed.", getDescription());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
logger.trace("(%s): Unable to create a new connection: Max number of connections has been reached.", getDescription());
|
||||||
}
|
}
|
||||||
|
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
|
logger.trace("(%s): Sleeping for %d seconds.", getDescription(), MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
||||||
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
||||||
lock.lock();
|
lock.lock();
|
||||||
}
|
}
|
||||||
@ -158,12 +174,13 @@ Pool::Entry Pool::tryGet()
|
|||||||
/// Fixme: There is a race condition here b/c we do not synchronize with Pool::Entry's copy-assignment operator
|
/// Fixme: There is a race condition here b/c we do not synchronize with Pool::Entry's copy-assignment operator
|
||||||
if (connection_ptr->ref_count == 0)
|
if (connection_ptr->ref_count == 0)
|
||||||
{
|
{
|
||||||
Entry res(connection_ptr, this);
|
{
|
||||||
if (res.tryForceConnected()) /// Tries to reestablish connection as well
|
Entry res(connection_ptr, this);
|
||||||
return res;
|
if (res.tryForceConnected()) /// Tries to reestablish connection as well
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
auto & logger = Poco::Util::Application::instance().logger();
|
logger.debug("(%s): Idle connection to MySQL server cannot be recovered, dropping it.", getDescription());
|
||||||
logger.information("Idle connection to mysql server cannot be recovered, dropping it.");
|
|
||||||
|
|
||||||
/// This one is disconnected, cannot be reestablished and so needs to be disposed of.
|
/// This one is disconnected, cannot be reestablished and so needs to be disposed of.
|
||||||
connection_it = connections.erase(connection_it);
|
connection_it = connections.erase(connection_it);
|
||||||
@ -186,6 +203,8 @@ Pool::Entry Pool::tryGet()
|
|||||||
|
|
||||||
void Pool::removeConnection(Connection* connection)
|
void Pool::removeConnection(Connection* connection)
|
||||||
{
|
{
|
||||||
|
logger.trace("(%s): Removing connection.", getDescription());
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(mutex);
|
std::lock_guard<std::mutex> lock(mutex);
|
||||||
if (connection)
|
if (connection)
|
||||||
{
|
{
|
||||||
@ -210,8 +229,6 @@ void Pool::Entry::forceConnected() const
|
|||||||
if (data == nullptr)
|
if (data == nullptr)
|
||||||
throw Poco::RuntimeException("Tried to access NULL database connection.");
|
throw Poco::RuntimeException("Tried to access NULL database connection.");
|
||||||
|
|
||||||
Poco::Util::Application & app = Poco::Util::Application::instance();
|
|
||||||
|
|
||||||
bool first = true;
|
bool first = true;
|
||||||
while (!tryForceConnected())
|
while (!tryForceConnected())
|
||||||
{
|
{
|
||||||
@ -220,7 +237,7 @@ void Pool::Entry::forceConnected() const
|
|||||||
else
|
else
|
||||||
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
|
||||||
|
|
||||||
app.logger().information("MYSQL: Reconnecting to " + pool->description);
|
pool->logger.debug("Entry: Reconnecting to MySQL server %s", pool->description);
|
||||||
data->conn.connect(
|
data->conn.connect(
|
||||||
pool->db.c_str(),
|
pool->db.c_str(),
|
||||||
pool->server.c_str(),
|
pool->server.c_str(),
|
||||||
@ -233,7 +250,8 @@ void Pool::Entry::forceConnected() const
|
|||||||
pool->ssl_key.c_str(),
|
pool->ssl_key.c_str(),
|
||||||
pool->connect_timeout,
|
pool->connect_timeout,
|
||||||
pool->rw_timeout,
|
pool->rw_timeout,
|
||||||
pool->enable_local_infile);
|
pool->enable_local_infile,
|
||||||
|
pool->opt_reconnect);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,18 +260,22 @@ bool Pool::Entry::tryForceConnected() const
|
|||||||
{
|
{
|
||||||
auto * const mysql_driver = data->conn.getDriver();
|
auto * const mysql_driver = data->conn.getDriver();
|
||||||
const auto prev_connection_id = mysql_thread_id(mysql_driver);
|
const auto prev_connection_id = mysql_thread_id(mysql_driver);
|
||||||
|
|
||||||
|
pool->logger.trace("Entry(connection %lu): sending PING to check if it is alive.", prev_connection_id);
|
||||||
if (data->conn.ping()) /// Attempts to reestablish lost connection
|
if (data->conn.ping()) /// Attempts to reestablish lost connection
|
||||||
{
|
{
|
||||||
const auto current_connection_id = mysql_thread_id(mysql_driver);
|
const auto current_connection_id = mysql_thread_id(mysql_driver);
|
||||||
if (prev_connection_id != current_connection_id)
|
if (prev_connection_id != current_connection_id)
|
||||||
{
|
{
|
||||||
auto & logger = Poco::Util::Application::instance().logger();
|
pool->logger.debug("Entry(connection %lu): Reconnected to MySQL server. Connection id changed: %lu -> %lu",
|
||||||
logger.information("Connection to mysql server has been reestablished. Connection id changed: %lu -> %lu",
|
current_connection_id, prev_connection_id, current_connection_id);
|
||||||
prev_connection_id, current_connection_id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pool->logger.trace("Entry(connection %lu): PING ok.", current_connection_id);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pool->logger.trace("Entry(connection %lu): PING failed.", prev_connection_id);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -274,15 +296,13 @@ void Pool::initialize()
|
|||||||
|
|
||||||
Pool::Connection * Pool::allocConnection(bool dont_throw_if_failed_first_time)
|
Pool::Connection * Pool::allocConnection(bool dont_throw_if_failed_first_time)
|
||||||
{
|
{
|
||||||
Poco::Util::Application & app = Poco::Util::Application::instance();
|
std::unique_ptr<Connection> conn_ptr{new Connection};
|
||||||
|
|
||||||
std::unique_ptr<Connection> conn(new Connection);
|
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
app.logger().information("MYSQL: Connecting to " + description);
|
logger.debug("Connecting to %s", description);
|
||||||
|
|
||||||
conn->conn.connect(
|
conn_ptr->conn.connect(
|
||||||
db.c_str(),
|
db.c_str(),
|
||||||
server.c_str(),
|
server.c_str(),
|
||||||
user.c_str(),
|
user.c_str(),
|
||||||
@ -294,29 +314,29 @@ Pool::Connection * Pool::allocConnection(bool dont_throw_if_failed_first_time)
|
|||||||
ssl_key.c_str(),
|
ssl_key.c_str(),
|
||||||
connect_timeout,
|
connect_timeout,
|
||||||
rw_timeout,
|
rw_timeout,
|
||||||
enable_local_infile);
|
enable_local_infile,
|
||||||
|
opt_reconnect);
|
||||||
}
|
}
|
||||||
catch (mysqlxx::ConnectionFailed & e)
|
catch (mysqlxx::ConnectionFailed & e)
|
||||||
{
|
{
|
||||||
|
logger.error(e.what());
|
||||||
|
|
||||||
if ((!was_successful && !dont_throw_if_failed_first_time)
|
if ((!was_successful && !dont_throw_if_failed_first_time)
|
||||||
|| e.errnum() == ER_ACCESS_DENIED_ERROR
|
|| e.errnum() == ER_ACCESS_DENIED_ERROR
|
||||||
|| e.errnum() == ER_DBACCESS_DENIED_ERROR
|
|| e.errnum() == ER_DBACCESS_DENIED_ERROR
|
||||||
|| e.errnum() == ER_BAD_DB_ERROR)
|
|| e.errnum() == ER_BAD_DB_ERROR)
|
||||||
{
|
{
|
||||||
app.logger().error(e.what());
|
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
app.logger().error(e.what());
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
connections.push_back(conn_ptr.get());
|
||||||
was_successful = true;
|
was_successful = true;
|
||||||
auto * connection = conn.release();
|
return conn_ptr.release();
|
||||||
connections.push_back(connection);
|
|
||||||
return connection;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
|
||||||
#include <Poco/Exception.h>
|
#include <Poco/Exception.h>
|
||||||
|
#include <Poco/Logger.h>
|
||||||
|
|
||||||
#include <mysqlxx/Connection.h>
|
#include <mysqlxx/Connection.h>
|
||||||
|
|
||||||
|
|
||||||
@ -157,27 +159,29 @@ public:
|
|||||||
*/
|
*/
|
||||||
Pool(const std::string & db_,
|
Pool(const std::string & db_,
|
||||||
const std::string & server_,
|
const std::string & server_,
|
||||||
const std::string & user_ = "",
|
const std::string & user_,
|
||||||
const std::string & password_ = "",
|
const std::string & password_,
|
||||||
unsigned port_ = 0,
|
unsigned port_,
|
||||||
const std::string & socket_ = "",
|
const std::string & socket_ = "",
|
||||||
unsigned connect_timeout_ = MYSQLXX_DEFAULT_TIMEOUT,
|
unsigned connect_timeout_ = MYSQLXX_DEFAULT_TIMEOUT,
|
||||||
unsigned rw_timeout_ = MYSQLXX_DEFAULT_RW_TIMEOUT,
|
unsigned rw_timeout_ = MYSQLXX_DEFAULT_RW_TIMEOUT,
|
||||||
unsigned default_connections_ = MYSQLXX_POOL_DEFAULT_START_CONNECTIONS,
|
unsigned default_connections_ = MYSQLXX_POOL_DEFAULT_START_CONNECTIONS,
|
||||||
unsigned max_connections_ = MYSQLXX_POOL_DEFAULT_MAX_CONNECTIONS,
|
unsigned max_connections_ = MYSQLXX_POOL_DEFAULT_MAX_CONNECTIONS,
|
||||||
unsigned enable_local_infile_ = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE)
|
unsigned enable_local_infile_ = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE,
|
||||||
: default_connections(default_connections_), max_connections(max_connections_),
|
bool opt_reconnect_ = MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT)
|
||||||
db(db_), server(server_), user(user_), password(password_), port(port_), socket(socket_),
|
: logger(Poco::Logger::get("mysqlxx::Pool")), default_connections(default_connections_),
|
||||||
connect_timeout(connect_timeout_), rw_timeout(rw_timeout_), enable_local_infile(enable_local_infile_) {}
|
max_connections(max_connections_), db(db_), server(server_), user(user_), password(password_), port(port_), socket(socket_),
|
||||||
|
connect_timeout(connect_timeout_), rw_timeout(rw_timeout_), enable_local_infile(enable_local_infile_),
|
||||||
|
opt_reconnect(opt_reconnect_) {}
|
||||||
|
|
||||||
Pool(const Pool & other)
|
Pool(const Pool & other)
|
||||||
: default_connections{other.default_connections},
|
: logger(other.logger), default_connections{other.default_connections},
|
||||||
max_connections{other.max_connections},
|
max_connections{other.max_connections},
|
||||||
db{other.db}, server{other.server},
|
db{other.db}, server{other.server},
|
||||||
user{other.user}, password{other.password},
|
user{other.user}, password{other.password},
|
||||||
port{other.port}, socket{other.socket},
|
port{other.port}, socket{other.socket},
|
||||||
connect_timeout{other.connect_timeout}, rw_timeout{other.rw_timeout},
|
connect_timeout{other.connect_timeout}, rw_timeout{other.rw_timeout},
|
||||||
enable_local_infile{other.enable_local_infile}
|
enable_local_infile{other.enable_local_infile}, opt_reconnect(other.opt_reconnect)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
Pool & operator=(const Pool &) = delete;
|
Pool & operator=(const Pool &) = delete;
|
||||||
@ -201,6 +205,8 @@ public:
|
|||||||
void removeConnection(Connection * connection);
|
void removeConnection(Connection * connection);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
Poco::Logger & logger;
|
||||||
|
|
||||||
/// Number of MySQL connections which are created at launch.
|
/// Number of MySQL connections which are created at launch.
|
||||||
unsigned default_connections;
|
unsigned default_connections;
|
||||||
/// Maximum possible number of connections
|
/// Maximum possible number of connections
|
||||||
@ -231,6 +237,7 @@ private:
|
|||||||
std::string ssl_cert;
|
std::string ssl_cert;
|
||||||
std::string ssl_key;
|
std::string ssl_key;
|
||||||
bool enable_local_infile;
|
bool enable_local_infile;
|
||||||
|
bool opt_reconnect;
|
||||||
|
|
||||||
/// True if connection was established at least once.
|
/// True if connection was established at least once.
|
||||||
bool was_successful{false};
|
bool was_successful{false};
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
#include <algorithm>
|
||||||
|
#include <ctime>
|
||||||
|
#include <random>
|
||||||
|
#include <thread>
|
||||||
#include <mysqlxx/PoolWithFailover.h>
|
#include <mysqlxx/PoolWithFailover.h>
|
||||||
|
|
||||||
|
|
||||||
@ -10,9 +14,12 @@ static bool startsWith(const std::string & s, const char * prefix)
|
|||||||
|
|
||||||
using namespace mysqlxx;
|
using namespace mysqlxx;
|
||||||
|
|
||||||
PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & config_,
|
PoolWithFailover::PoolWithFailover(
|
||||||
const std::string & config_name_, const unsigned default_connections_,
|
const Poco::Util::AbstractConfiguration & config_,
|
||||||
const unsigned max_connections_, const size_t max_tries_)
|
const std::string & config_name_,
|
||||||
|
const unsigned default_connections_,
|
||||||
|
const unsigned max_connections_,
|
||||||
|
const size_t max_tries_)
|
||||||
: max_tries(max_tries_)
|
: max_tries(max_tries_)
|
||||||
{
|
{
|
||||||
shareable = config_.getBool(config_name_ + ".share_connection", false);
|
shareable = config_.getBool(config_name_ + ".share_connection", false);
|
||||||
@ -33,6 +40,19 @@ PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & con
|
|||||||
std::make_shared<Pool>(config_, replica_name, default_connections_, max_connections_, config_name_.c_str()));
|
std::make_shared<Pool>(config_, replica_name, default_connections_, max_connections_, config_name_.c_str()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// PoolWithFailover objects are stored in a cache inside PoolFactory.
|
||||||
|
/// This cache is reset by ExternalDictionariesLoader after every SYSTEM RELOAD DICTIONAR{Y|IES}
|
||||||
|
/// which triggers massive re-constructing of connection pools.
|
||||||
|
/// The state of PRNGs like std::mt19937 is considered to be quite heavy
|
||||||
|
/// thus here we attempt to optimize its construction.
|
||||||
|
static thread_local std::mt19937 rnd_generator(
|
||||||
|
std::hash<std::thread::id>{}(std::this_thread::get_id()) + std::clock());
|
||||||
|
for (auto & [_, replicas] : replicas_by_priority)
|
||||||
|
{
|
||||||
|
if (replicas.size() > 1)
|
||||||
|
std::shuffle(replicas.begin(), replicas.end(), rnd_generator);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -41,16 +61,38 @@ PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & con
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
PoolWithFailover::PoolWithFailover(const std::string & config_name_, const unsigned default_connections_,
|
|
||||||
const unsigned max_connections_, const size_t max_tries_)
|
PoolWithFailover::PoolWithFailover(
|
||||||
: PoolWithFailover{
|
const std::string & config_name_,
|
||||||
Poco::Util::Application::instance().config(), config_name_,
|
const unsigned default_connections_,
|
||||||
default_connections_, max_connections_, max_tries_}
|
const unsigned max_connections_,
|
||||||
|
const size_t max_tries_)
|
||||||
|
: PoolWithFailover{Poco::Util::Application::instance().config(),
|
||||||
|
config_name_, default_connections_, max_connections_, max_tries_}
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PoolWithFailover::PoolWithFailover(
|
||||||
|
const std::string & database,
|
||||||
|
const RemoteDescription & addresses,
|
||||||
|
const std::string & user,
|
||||||
|
const std::string & password,
|
||||||
|
size_t max_tries_)
|
||||||
|
: max_tries(max_tries_)
|
||||||
|
, shareable(false)
|
||||||
|
{
|
||||||
|
/// Replicas have the same priority, but traversed replicas are moved to the end of the queue.
|
||||||
|
for (const auto & [host, port] : addresses)
|
||||||
|
{
|
||||||
|
replicas_by_priority[0].emplace_back(std::make_shared<Pool>(database, host, user, password, port));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
PoolWithFailover::PoolWithFailover(const PoolWithFailover & other)
|
PoolWithFailover::PoolWithFailover(const PoolWithFailover & other)
|
||||||
: max_tries{other.max_tries}, shareable{other.shareable}
|
: max_tries{other.max_tries}
|
||||||
|
, shareable{other.shareable}
|
||||||
{
|
{
|
||||||
if (shareable)
|
if (shareable)
|
||||||
{
|
{
|
||||||
|
@ -11,6 +11,8 @@
|
|||||||
namespace mysqlxx
|
namespace mysqlxx
|
||||||
{
|
{
|
||||||
/** MySQL connection pool with support of failover.
|
/** MySQL connection pool with support of failover.
|
||||||
|
*
|
||||||
|
* For dictionary source:
|
||||||
* Have information about replicas and their priorities.
|
* Have information about replicas and their priorities.
|
||||||
* Tries to connect to replica in an order of priority. When equal priority, choose replica with maximum time without connections.
|
* Tries to connect to replica in an order of priority. When equal priority, choose replica with maximum time without connections.
|
||||||
*
|
*
|
||||||
@ -68,42 +70,58 @@ namespace mysqlxx
|
|||||||
using PoolPtr = std::shared_ptr<Pool>;
|
using PoolPtr = std::shared_ptr<Pool>;
|
||||||
using Replicas = std::vector<PoolPtr>;
|
using Replicas = std::vector<PoolPtr>;
|
||||||
|
|
||||||
/// [priority][index] -> replica.
|
/// [priority][index] -> replica. Highest priority is 0.
|
||||||
using ReplicasByPriority = std::map<int, Replicas>;
|
using ReplicasByPriority = std::map<int, Replicas>;
|
||||||
|
|
||||||
ReplicasByPriority replicas_by_priority;
|
ReplicasByPriority replicas_by_priority;
|
||||||
|
|
||||||
/// Number of connection tries.
|
/// Number of connection tries.
|
||||||
size_t max_tries;
|
size_t max_tries;
|
||||||
/// Mutex for set of replicas.
|
/// Mutex for set of replicas.
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
|
|
||||||
/// Can the Pool be shared
|
/// Can the Pool be shared
|
||||||
bool shareable;
|
bool shareable;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using Entry = Pool::Entry;
|
using Entry = Pool::Entry;
|
||||||
|
using RemoteDescription = std::vector<std::pair<std::string, uint16_t>>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* config_name Name of parameter in configuration file.
|
* * Mysql dictionary sourse related params:
|
||||||
|
* config_name Name of parameter in configuration file for dictionary source.
|
||||||
|
*
|
||||||
|
* * Mysql storage related parameters:
|
||||||
|
* replicas_description
|
||||||
|
*
|
||||||
|
* * Mutual parameters:
|
||||||
* default_connections Number of connection in pool to each replica at start.
|
* default_connections Number of connection in pool to each replica at start.
|
||||||
* max_connections Maximum number of connections in pool to each replica.
|
* max_connections Maximum number of connections in pool to each replica.
|
||||||
* max_tries_ Max number of connection tries.
|
* max_tries_ Max number of connection tries.
|
||||||
*/
|
*/
|
||||||
PoolWithFailover(const std::string & config_name_,
|
PoolWithFailover(
|
||||||
|
const std::string & config_name_,
|
||||||
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
||||||
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
|
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
|
||||||
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
|
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
|
||||||
|
|
||||||
PoolWithFailover(const Poco::Util::AbstractConfiguration & config_,
|
PoolWithFailover(
|
||||||
|
const Poco::Util::AbstractConfiguration & config_,
|
||||||
const std::string & config_name_,
|
const std::string & config_name_,
|
||||||
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
||||||
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
|
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
|
||||||
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
|
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
|
||||||
|
|
||||||
|
PoolWithFailover(
|
||||||
|
const std::string & database,
|
||||||
|
const RemoteDescription & addresses,
|
||||||
|
const std::string & user,
|
||||||
|
const std::string & password,
|
||||||
|
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
|
||||||
|
|
||||||
PoolWithFailover(const PoolWithFailover & other);
|
PoolWithFailover(const PoolWithFailover & other);
|
||||||
|
|
||||||
/** Allocates a connection to use. */
|
/** Allocates a connection to use. */
|
||||||
Entry get();
|
Entry get();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using PoolWithFailoverPtr = std::shared_ptr<PoolWithFailover>;
|
||||||
}
|
}
|
||||||
|
@ -1,11 +1,16 @@
|
|||||||
#if __has_include(<mysql.h>)
|
#if __has_include(<mysql.h>)
|
||||||
|
#include <errmsg.h>
|
||||||
#include <mysql.h>
|
#include <mysql.h>
|
||||||
#else
|
#else
|
||||||
|
#include <mysql/errmsg.h>
|
||||||
#include <mysql/mysql.h>
|
#include <mysql/mysql.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#include <Poco/Logger.h>
|
||||||
|
|
||||||
#include <mysqlxx/Connection.h>
|
#include <mysqlxx/Connection.h>
|
||||||
#include <mysqlxx/Query.h>
|
#include <mysqlxx/Query.h>
|
||||||
|
#include <mysqlxx/Types.h>
|
||||||
|
|
||||||
|
|
||||||
namespace mysqlxx
|
namespace mysqlxx
|
||||||
@ -57,8 +62,24 @@ void Query::reset()
|
|||||||
void Query::executeImpl()
|
void Query::executeImpl()
|
||||||
{
|
{
|
||||||
std::string query_string = query_buf.str();
|
std::string query_string = query_buf.str();
|
||||||
if (mysql_real_query(conn->getDriver(), query_string.data(), query_string.size()))
|
|
||||||
throw BadQuery(errorMessage(conn->getDriver()), mysql_errno(conn->getDriver()));
|
MYSQL* mysql_driver = conn->getDriver();
|
||||||
|
|
||||||
|
auto & logger = Poco::Logger::get("mysqlxx::Query");
|
||||||
|
logger.trace("Running MySQL query using connection %lu", mysql_thread_id(mysql_driver));
|
||||||
|
if (mysql_real_query(mysql_driver, query_string.data(), query_string.size()))
|
||||||
|
{
|
||||||
|
const auto err_no = mysql_errno(mysql_driver);
|
||||||
|
switch (err_no)
|
||||||
|
{
|
||||||
|
case CR_SERVER_GONE_ERROR:
|
||||||
|
[[fallthrough]];
|
||||||
|
case CR_SERVER_LOST:
|
||||||
|
throw ConnectionLost(errorMessage(mysql_driver), err_no);
|
||||||
|
default:
|
||||||
|
throw BadQuery(errorMessage(mysql_driver), err_no);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
UseQueryResult Query::use()
|
UseQueryResult Query::use()
|
||||||
@ -71,16 +92,6 @@ UseQueryResult Query::use()
|
|||||||
return UseQueryResult(res, conn, this);
|
return UseQueryResult(res, conn, this);
|
||||||
}
|
}
|
||||||
|
|
||||||
StoreQueryResult Query::store()
|
|
||||||
{
|
|
||||||
executeImpl();
|
|
||||||
MYSQL_RES * res = mysql_store_result(conn->getDriver());
|
|
||||||
if (!res)
|
|
||||||
checkError(conn->getDriver());
|
|
||||||
|
|
||||||
return StoreQueryResult(res, conn, this);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Query::execute()
|
void Query::execute()
|
||||||
{
|
{
|
||||||
executeImpl();
|
executeImpl();
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
#include <mysqlxx/UseQueryResult.h>
|
#include <mysqlxx/UseQueryResult.h>
|
||||||
#include <mysqlxx/StoreQueryResult.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace mysqlxx
|
namespace mysqlxx
|
||||||
@ -46,11 +45,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
UseQueryResult use();
|
UseQueryResult use();
|
||||||
|
|
||||||
/** Выполнить запрос с загрузкой на клиента всех строк.
|
|
||||||
* Требуется оперативка, чтобы вместить весь результат, зато к строкам можно обращаться в произвольном порядке.
|
|
||||||
*/
|
|
||||||
StoreQueryResult store();
|
|
||||||
|
|
||||||
/// Значение auto increment после последнего INSERT-а.
|
/// Значение auto increment после последнего INSERT-а.
|
||||||
UInt64 insertID();
|
UInt64 insertID();
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ class Connection;
|
|||||||
class Query;
|
class Query;
|
||||||
|
|
||||||
|
|
||||||
/** Базовый класс для UseQueryResult и StoreQueryResult.
|
/** Базовый класс для UseQueryResult.
|
||||||
* Содержит общую часть реализации,
|
* Содержит общую часть реализации,
|
||||||
* Ссылается на Connection. Если уничтожить Connection, то пользоваться ResultBase и любым результатом нельзя.
|
* Ссылается на Connection. Если уничтожить Connection, то пользоваться ResultBase и любым результатом нельзя.
|
||||||
* Использовать объект можно только для результата одного запроса!
|
* Использовать объект можно только для результата одного запроса!
|
||||||
|
@ -35,7 +35,7 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Для того, чтобы создать Row, используйте соответствующие методы UseQueryResult или StoreQueryResult. */
|
/** Для того, чтобы создать Row, используйте соответствующие методы UseQueryResult. */
|
||||||
Row(MYSQL_ROW row_, ResultBase * res_, MYSQL_LENGTHS lengths_)
|
Row(MYSQL_ROW row_, ResultBase * res_, MYSQL_LENGTHS lengths_)
|
||||||
: row(row_), res(res_), lengths(lengths_)
|
: row(row_), res(res_), lengths(lengths_)
|
||||||
{
|
{
|
||||||
|
@ -1,30 +0,0 @@
|
|||||||
#if __has_include(<mysql.h>)
|
|
||||||
#include <mysql.h>
|
|
||||||
#else
|
|
||||||
#include <mysql/mysql.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <mysqlxx/Connection.h>
|
|
||||||
#include <mysqlxx/StoreQueryResult.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace mysqlxx
|
|
||||||
{
|
|
||||||
|
|
||||||
StoreQueryResult::StoreQueryResult(MYSQL_RES * res_, Connection * conn_, const Query * query_) : ResultBase(res_, conn_, query_)
|
|
||||||
{
|
|
||||||
UInt64 rows = mysql_num_rows(res);
|
|
||||||
reserve(rows);
|
|
||||||
lengths.resize(rows * num_fields);
|
|
||||||
|
|
||||||
for (UInt64 i = 0; MYSQL_ROW row = mysql_fetch_row(res); ++i)
|
|
||||||
{
|
|
||||||
MYSQL_LENGTHS lengths_for_row = mysql_fetch_lengths(res);
|
|
||||||
memcpy(&lengths[i * num_fields], lengths_for_row, sizeof(lengths[0]) * num_fields);
|
|
||||||
|
|
||||||
push_back(Row(row, this, &lengths[i * num_fields]));
|
|
||||||
}
|
|
||||||
checkError(conn->getDriver());
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,45 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include <mysqlxx/ResultBase.h>
|
|
||||||
#include <mysqlxx/Row.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace mysqlxx
|
|
||||||
{
|
|
||||||
|
|
||||||
class Connection;
|
|
||||||
|
|
||||||
|
|
||||||
/** Результат выполнения запроса, загруженный полностью на клиента.
|
|
||||||
* Это требует оперативку, чтобы вместить весь результат,
|
|
||||||
* но зато реализует произвольный доступ к строкам по индексу.
|
|
||||||
* Если размер результата большой - используйте лучше UseQueryResult.
|
|
||||||
* Объект содержит ссылку на Connection.
|
|
||||||
* Если уничтожить Connection, то объект становится некорректным и все строки результата - тоже.
|
|
||||||
* Если задать следующий запрос в соединении, то объект и все строки тоже становятся некорректными.
|
|
||||||
* Использовать объект можно только для результата одного запроса!
|
|
||||||
* (При попытке присвоить объекту результат следующего запроса - UB.)
|
|
||||||
*/
|
|
||||||
class StoreQueryResult : public std::vector<Row>, public ResultBase
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
StoreQueryResult(MYSQL_RES * res_, Connection * conn_, const Query * query_);
|
|
||||||
|
|
||||||
size_t num_rows() const { return size(); }
|
|
||||||
|
|
||||||
private:
|
|
||||||
|
|
||||||
/** Не смотря на то, что весь результат выполнения запроса загружается на клиента,
|
|
||||||
* и все указатели MYSQL_ROW на отдельные строки различные,
|
|
||||||
* при этом функция mysql_fetch_lengths() возвращает длины
|
|
||||||
* для текущей строки по одному и тому же адресу.
|
|
||||||
* То есть, чтобы можно было пользоваться несколькими Row одновременно,
|
|
||||||
* необходимо заранее куда-то сложить все длины.
|
|
||||||
*/
|
|
||||||
using Lengths = std::vector<MYSQL_LENGTH>;
|
|
||||||
Lengths lengths;
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
@ -12,8 +12,7 @@ class Connection;
|
|||||||
|
|
||||||
/** Результат выполнения запроса, предназначенный для чтения строк, одна за другой.
|
/** Результат выполнения запроса, предназначенный для чтения строк, одна за другой.
|
||||||
* В памяти при этом хранится только одна, текущая строка.
|
* В памяти при этом хранится только одна, текущая строка.
|
||||||
* В отличие от StoreQueryResult, произвольный доступ к строкам невозможен,
|
* При чтении следующей строки, предыдущая становится некорректной.
|
||||||
* а также, при чтении следующей строки, предыдущая становится некорректной.
|
|
||||||
* Вы обязаны прочитать все строки из результата
|
* Вы обязаны прочитать все строки из результата
|
||||||
* (вызывать функцию fetch(), пока она не вернёт значение, преобразующееся к false),
|
* (вызывать функцию fetch(), пока она не вернёт значение, преобразующееся к false),
|
||||||
* иначе при следующем запросе будет выкинуто исключение с текстом "Commands out of sync".
|
* иначе при следующем запросе будет выкинуто исключение с текстом "Commands out of sync".
|
||||||
|
@ -25,7 +25,7 @@ class ResultBase;
|
|||||||
|
|
||||||
/** Represents a single value read from MySQL.
|
/** Represents a single value read from MySQL.
|
||||||
* It doesn't owns the value. It's just a wrapper of a pair (const char *, size_t).
|
* It doesn't owns the value. It's just a wrapper of a pair (const char *, size_t).
|
||||||
* If the UseQueryResult/StoreQueryResult or Connection is destroyed,
|
* If the UseQueryResult or Connection is destroyed,
|
||||||
* or you have read the next Row while using UseQueryResult, then the object is invalidated.
|
* or you have read the next Row while using UseQueryResult, then the object is invalidated.
|
||||||
* Allows to transform (parse) the value to various data types:
|
* Allows to transform (parse) the value to various data types:
|
||||||
* - with getUInt(), getString(), ... (recommended);
|
* - with getUInt(), getString(), ... (recommended);
|
||||||
|
@ -1,5 +1,2 @@
|
|||||||
add_executable (mysqlxx_test mysqlxx_test.cpp)
|
|
||||||
target_link_libraries (mysqlxx_test PRIVATE mysqlxx)
|
|
||||||
|
|
||||||
add_executable (mysqlxx_pool_test mysqlxx_pool_test.cpp)
|
add_executable (mysqlxx_pool_test mysqlxx_pool_test.cpp)
|
||||||
target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx)
|
target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx)
|
||||||
|
@ -1,21 +0,0 @@
|
|||||||
<?xml version = '1.0' encoding = 'utf-8'?>
|
|
||||||
<yandex>
|
|
||||||
<mysql_goals>
|
|
||||||
<port>3306</port>
|
|
||||||
<user>root</user>
|
|
||||||
<db>Metrica</db>
|
|
||||||
<password>qwerty</password>
|
|
||||||
<replica>
|
|
||||||
<host>example02t</host>
|
|
||||||
<priority>0</priority>
|
|
||||||
</replica>
|
|
||||||
<replica>
|
|
||||||
<host>example02t</host>
|
|
||||||
<port>3306</port>
|
|
||||||
<user>root</user>
|
|
||||||
<password>qwerty</password>
|
|
||||||
<db>Metrica</db>
|
|
||||||
<priority>1</priority>
|
|
||||||
</replica>
|
|
||||||
</mysql_goals>
|
|
||||||
</yandex>
|
|
@ -1,216 +0,0 @@
|
|||||||
#include <iostream>
|
|
||||||
#include <mysqlxx/mysqlxx.h>
|
|
||||||
|
|
||||||
|
|
||||||
int main(int, char **)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
mysqlxx::Connection connection("test", "127.0.0.1", "root", "qwerty", 3306);
|
|
||||||
std::cerr << "Connected." << std::endl;
|
|
||||||
|
|
||||||
{
|
|
||||||
mysqlxx::Query query = connection.query();
|
|
||||||
query << "SELECT 1 x, '2010-01-01 01:01:01' d";
|
|
||||||
mysqlxx::UseQueryResult result = query.use();
|
|
||||||
std::cerr << "use() called." << std::endl;
|
|
||||||
|
|
||||||
while (mysqlxx::Row row = result.fetch())
|
|
||||||
{
|
|
||||||
std::cerr << "Fetched row." << std::endl;
|
|
||||||
std::cerr << row[0] << ", " << row["x"] << std::endl;
|
|
||||||
std::cerr << row[1] << ", " << row["d"]
|
|
||||||
<< ", " << row[1].getDate()
|
|
||||||
<< ", " << row[1].getDateTime()
|
|
||||||
<< ", " << row[1].getDate()
|
|
||||||
<< ", " << row[1].getDateTime()
|
|
||||||
<< std::endl
|
|
||||||
<< row[1].getDate() << ", " << row[1].getDateTime() << std::endl
|
|
||||||
<< row[1].getDate() << ", " << row[1].getDateTime() << std::endl
|
|
||||||
<< row[1].getDate() << ", " << row[1].getDateTime() << std::endl
|
|
||||||
<< row[1].getDate() << ", " << row[1].getDateTime() << std::endl
|
|
||||||
;
|
|
||||||
|
|
||||||
time_t t1 = row[0];
|
|
||||||
time_t t2 = row[1];
|
|
||||||
std::cerr << t1 << ", " << LocalDateTime(t1) << std::endl;
|
|
||||||
std::cerr << t2 << ", " << LocalDateTime(t2) << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
mysqlxx::Query query = connection.query();
|
|
||||||
query << "SELECT 1234567890 abc, 12345.67890 def UNION ALL SELECT 9876543210, 98765.43210";
|
|
||||||
mysqlxx::StoreQueryResult result = query.store();
|
|
||||||
|
|
||||||
std::cerr << result.at(0)["abc"].getUInt() << ", " << result.at(0)["def"].getDouble() << std::endl
|
|
||||||
<< result.at(1)["abc"].getUInt() << ", " << result.at(1)["def"].getDouble() << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
mysqlxx::UseQueryResult result = connection.query("SELECT 'abc\\\\def' x").use();
|
|
||||||
mysqlxx::Row row = result.fetch();
|
|
||||||
std::cerr << row << std::endl;
|
|
||||||
std::cerr << row << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
mysqlxx::Query query = connection.query("SEL");
|
|
||||||
query << "ECT 1";
|
|
||||||
|
|
||||||
std::cerr << query.store().at(0).at(0) << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
/// Копирование Query
|
|
||||||
mysqlxx::Query query = connection.query("SELECT 'Ok' x");
|
|
||||||
using Queries = std::vector<mysqlxx::Query>;
|
|
||||||
Queries queries;
|
|
||||||
queries.push_back(query);
|
|
||||||
|
|
||||||
for (auto & q : queries)
|
|
||||||
{
|
|
||||||
std::cerr << q.str() << std::endl;
|
|
||||||
std::cerr << q.store().at(0) << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
/// Копирование Query
|
|
||||||
mysqlxx::Query query1 = connection.query("SELECT");
|
|
||||||
mysqlxx::Query query2 = query1;
|
|
||||||
query2 << " 1";
|
|
||||||
|
|
||||||
std::cerr << query1.str() << ", " << query2.str() << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
/// Копирование Query
|
|
||||||
using Queries = std::list<mysqlxx::Query>;
|
|
||||||
Queries queries;
|
|
||||||
queries.push_back(connection.query("SELECT"));
|
|
||||||
mysqlxx::Query & qref = queries.back();
|
|
||||||
qref << " 1";
|
|
||||||
|
|
||||||
for (auto & query : queries)
|
|
||||||
{
|
|
||||||
std::cerr << query.str() << std::endl;
|
|
||||||
std::cerr << query.store().at(0) << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
/// Транзакции
|
|
||||||
connection.query("DROP TABLE IF EXISTS tmp").execute();
|
|
||||||
connection.query("CREATE TABLE tmp (x INT, PRIMARY KEY (x)) ENGINE = InnoDB").execute();
|
|
||||||
|
|
||||||
mysqlxx::Transaction trans(connection);
|
|
||||||
connection.query("INSERT INTO tmp VALUES (1)").execute();
|
|
||||||
|
|
||||||
std::cerr << connection.query("SELECT * FROM tmp").store().size() << std::endl;
|
|
||||||
|
|
||||||
trans.rollback();
|
|
||||||
|
|
||||||
std::cerr << connection.query("SELECT * FROM tmp").store().size() << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
/// Транзакции
|
|
||||||
connection.query("DROP TABLE IF EXISTS tmp").execute();
|
|
||||||
connection.query("CREATE TABLE tmp (x INT, PRIMARY KEY (x)) ENGINE = InnoDB").execute();
|
|
||||||
|
|
||||||
{
|
|
||||||
mysqlxx::Transaction trans(connection);
|
|
||||||
connection.query("INSERT INTO tmp VALUES (1)").execute();
|
|
||||||
std::cerr << connection.query("SELECT * FROM tmp").store().size() << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cerr << connection.query("SELECT * FROM tmp").store().size() << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
/// Транзакции
|
|
||||||
mysqlxx::Connection connection2("test", "127.0.0.1", "root", "qwerty", 3306);
|
|
||||||
connection2.query("DROP TABLE IF EXISTS tmp").execute();
|
|
||||||
connection2.query("CREATE TABLE tmp (x INT, PRIMARY KEY (x)) ENGINE = InnoDB").execute();
|
|
||||||
|
|
||||||
mysqlxx::Transaction trans(connection2);
|
|
||||||
connection2.query("INSERT INTO tmp VALUES (1)").execute();
|
|
||||||
std::cerr << connection2.query("SELECT * FROM tmp").store().size() << std::endl;
|
|
||||||
}
|
|
||||||
std::cerr << connection.query("SELECT * FROM tmp").store().size() << std::endl;
|
|
||||||
|
|
||||||
{
|
|
||||||
/// NULL
|
|
||||||
mysqlxx::Null<int> x = mysqlxx::null;
|
|
||||||
std::cerr << (x == mysqlxx::null ? "Ok" : "Fail") << std::endl;
|
|
||||||
std::cerr << (x == 0 ? "Fail" : "Ok") << std::endl;
|
|
||||||
std::cerr << (x.isNull() ? "Ok" : "Fail") << std::endl;
|
|
||||||
x = 1;
|
|
||||||
std::cerr << (x == mysqlxx::null ? "Fail" : "Ok") << std::endl;
|
|
||||||
std::cerr << (x == 0 ? "Fail" : "Ok") << std::endl;
|
|
||||||
std::cerr << (x == 1 ? "Ok" : "Fail") << std::endl;
|
|
||||||
std::cerr << (x.isNull() ? "Fail" : "Ok") << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
/// Исключения при попытке достать значение не того типа
|
|
||||||
try
|
|
||||||
{
|
|
||||||
connection.query("SELECT -1").store().at(0).at(0).getUInt();
|
|
||||||
std::cerr << "Fail" << std::endl;
|
|
||||||
}
|
|
||||||
catch (const mysqlxx::Exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << "Ok, " << e.message() << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
connection.query("SELECT 'xxx'").store().at(0).at(0).getInt();
|
|
||||||
std::cerr << "Fail" << std::endl;
|
|
||||||
}
|
|
||||||
catch (const mysqlxx::Exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << "Ok, " << e.message() << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
connection.query("SELECT NULL").store().at(0).at(0).getString();
|
|
||||||
std::cerr << "Fail" << std::endl;
|
|
||||||
}
|
|
||||||
catch (const mysqlxx::Exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << "Ok, " << e.message() << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
connection.query("SELECT 123").store().at(0).at(0).getDate();
|
|
||||||
std::cerr << "Fail" << std::endl;
|
|
||||||
}
|
|
||||||
catch (const mysqlxx::Exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << "Ok, " << e.message() << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
connection.query("SELECT '2011-01-01'").store().at(0).at(0).getDateTime();
|
|
||||||
std::cerr << "Fail" << std::endl;
|
|
||||||
}
|
|
||||||
catch (const mysqlxx::Exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << "Ok, " << e.message() << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (const mysqlxx::Exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << e.code() << ", " << e.message() << std::endl;
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
@ -4,5 +4,5 @@
|
|||||||
add_library(readpassphrase readpassphrase.c)
|
add_library(readpassphrase readpassphrase.c)
|
||||||
|
|
||||||
set_target_properties(readpassphrase PROPERTIES LINKER_LANGUAGE C)
|
set_target_properties(readpassphrase PROPERTIES LINKER_LANGUAGE C)
|
||||||
target_compile_options(readpassphrase PRIVATE -Wno-unused-result -Wno-reserved-id-macro)
|
target_compile_options(readpassphrase PRIVATE -Wno-unused-result -Wno-reserved-id-macro -Wno-disabled-macro-expansion)
|
||||||
target_include_directories(readpassphrase PUBLIC .)
|
target_include_directories(readpassphrase PUBLIC .)
|
||||||
|
@ -94,7 +94,7 @@ restart:
|
|||||||
if (input != STDIN_FILENO && tcgetattr(input, &oterm) == 0) {
|
if (input != STDIN_FILENO && tcgetattr(input, &oterm) == 0) {
|
||||||
memcpy(&term, &oterm, sizeof(term));
|
memcpy(&term, &oterm, sizeof(term));
|
||||||
if (!(flags & RPP_ECHO_ON))
|
if (!(flags & RPP_ECHO_ON))
|
||||||
term.c_lflag &= ~(ECHO | ECHONL);
|
term.c_lflag &= ~((unsigned int) (ECHO | ECHONL));
|
||||||
#ifdef VSTATUS
|
#ifdef VSTATUS
|
||||||
if (term.c_cc[VSTATUS] != _POSIX_VDISABLE)
|
if (term.c_cc[VSTATUS] != _POSIX_VDISABLE)
|
||||||
term.c_cc[VSTATUS] = _POSIX_VDISABLE;
|
term.c_cc[VSTATUS] = _POSIX_VDISABLE;
|
||||||
|
@ -16,6 +16,10 @@ if (ENABLE_CLANG_TIDY)
|
|||||||
|
|
||||||
set (USE_CLANG_TIDY ON)
|
set (USE_CLANG_TIDY ON)
|
||||||
|
|
||||||
|
# clang-tidy requires assertions to guide the analysis
|
||||||
|
# Note that NDEBUG is set implicitly by CMake for non-debug builds
|
||||||
|
set(COMPILER_FLAGS "${COMPILER_FLAGS} -UNDEBUG")
|
||||||
|
|
||||||
# The variable CMAKE_CXX_CLANG_TIDY will be set inside src and base directories with non third-party code.
|
# The variable CMAKE_CXX_CLANG_TIDY will be set inside src and base directories with non third-party code.
|
||||||
# set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
# set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||||
elseif (FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION)
|
elseif (FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
|
if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
|
||||||
set (ARCH_AMD64 1)
|
set (ARCH_AMD64 1)
|
||||||
endif ()
|
endif ()
|
||||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
|
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)")
|
||||||
set (ARCH_AARCH64 1)
|
set (ARCH_AARCH64 1)
|
||||||
endif ()
|
endif ()
|
||||||
if (ARCH_AARCH64 OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
|
if (ARCH_AARCH64 OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
# This strings autochanged from release_lib.sh:
|
# This strings autochanged from release_lib.sh:
|
||||||
SET(VERSION_REVISION 54447)
|
SET(VERSION_REVISION 54451)
|
||||||
SET(VERSION_MAJOR 21)
|
SET(VERSION_MAJOR 21)
|
||||||
SET(VERSION_MINOR 2)
|
SET(VERSION_MINOR 6)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH 53d0c9fa7255aa1dc48991d19f4246ff71cc2fd7)
|
SET(VERSION_GITHASH 96fced4c3cf432fb0b401d2ab01f0c56e5f74a96)
|
||||||
SET(VERSION_DESCRIBE v21.2.1.1-prestable)
|
SET(VERSION_DESCRIBE v21.6.1.1-prestable)
|
||||||
SET(VERSION_STRING 21.2.1.1)
|
SET(VERSION_STRING 21.6.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
set (DEFAULT_LIBS "-nodefaultlibs")
|
set (DEFAULT_LIBS "-nodefaultlibs")
|
||||||
|
|
||||||
if (NOT COMPILER_CLANG)
|
|
||||||
message (FATAL_ERROR "Darwin build is supported only for Clang")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${COVERAGE_OPTION} -lc -lm -lpthread -ldl")
|
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${COVERAGE_OPTION} -lc -lm -lpthread -ldl")
|
||||||
|
|
||||||
|
if (COMPILER_GCC)
|
||||||
|
set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc_eh")
|
||||||
|
if (ARCH_AARCH64)
|
||||||
|
set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc")
|
||||||
|
endif ()
|
||||||
|
endif ()
|
||||||
|
|
||||||
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||||
|
|
||||||
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||||
|
14
cmake/darwin/toolchain-aarch64.cmake
Normal file
14
cmake/darwin/toolchain-aarch64.cmake
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
set (CMAKE_SYSTEM_NAME "Darwin")
|
||||||
|
set (CMAKE_SYSTEM_PROCESSOR "aarch64")
|
||||||
|
set (CMAKE_C_COMPILER_TARGET "aarch64-apple-darwin")
|
||||||
|
set (CMAKE_CXX_COMPILER_TARGET "aarch64-apple-darwin")
|
||||||
|
set (CMAKE_ASM_COMPILER_TARGET "aarch64-apple-darwin")
|
||||||
|
set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64")
|
||||||
|
|
||||||
|
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
||||||
|
|
||||||
|
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||||
|
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||||
|
|
||||||
|
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||||
|
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
@ -1,3 +1,8 @@
|
|||||||
|
if (OS_DARWIN AND COMPILER_GCC)
|
||||||
|
# AMQP-CPP requires libuv which cannot be built with GCC in macOS due to a bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93082
|
||||||
|
set (ENABLE_AMQPCPP OFF CACHE INTERNAL "")
|
||||||
|
endif()
|
||||||
|
|
||||||
option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES})
|
option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT ENABLE_AMQPCPP)
|
if (NOT ENABLE_AMQPCPP)
|
||||||
|
@ -1,4 +1,8 @@
|
|||||||
option (ENABLE_BASE64 "Enable base64" ${ENABLE_LIBRARIES})
|
if(ARCH_AMD64 OR ARCH_ARM)
|
||||||
|
option (ENABLE_BASE64 "Enable base64" ${ENABLE_LIBRARIES})
|
||||||
|
elseif(ENABLE_BASE64)
|
||||||
|
message (${RECONFIGURE_MESSAGE_LEVEL} "base64 library is only supported on x86_64 and aarch64")
|
||||||
|
endif()
|
||||||
|
|
||||||
if (NOT ENABLE_BASE64)
|
if (NOT ENABLE_BASE64)
|
||||||
return()
|
return()
|
||||||
|
@ -1,3 +1,8 @@
|
|||||||
|
if (OS_DARWIN AND COMPILER_GCC)
|
||||||
|
# Cassandra requires libuv which cannot be built with GCC in macOS due to a bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93082
|
||||||
|
set (ENABLE_CASSANDRA OFF CACHE INTERNAL "")
|
||||||
|
endif()
|
||||||
|
|
||||||
option(ENABLE_CASSANDRA "Enable Cassandra" ${ENABLE_LIBRARIES})
|
option(ENABLE_CASSANDRA "Enable Cassandra" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT ENABLE_CASSANDRA)
|
if (NOT ENABLE_CASSANDRA)
|
||||||
|
@ -32,27 +32,27 @@ if (CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE)
|
|||||||
if (CCACHE_VERSION VERSION_GREATER "3.2.0" OR NOT CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
if (CCACHE_VERSION VERSION_GREATER "3.2.0" OR NOT CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||||
message(STATUS "Using ${CCACHE_FOUND} ${CCACHE_VERSION}")
|
message(STATUS "Using ${CCACHE_FOUND} ${CCACHE_VERSION}")
|
||||||
|
|
||||||
# debian (debhlpers) set SOURCE_DATE_EPOCH environment variable, that is
|
set (CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_FOUND} ${CMAKE_CXX_COMPILER_LAUNCHER})
|
||||||
|
set (CMAKE_C_COMPILER_LAUNCHER ${CCACHE_FOUND} ${CMAKE_C_COMPILER_LAUNCHER})
|
||||||
|
|
||||||
|
set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
|
||||||
|
|
||||||
|
# debian (debhelpers) set SOURCE_DATE_EPOCH environment variable, that is
|
||||||
# filled from the debian/changelog or current time.
|
# filled from the debian/changelog or current time.
|
||||||
#
|
#
|
||||||
# - 4.0+ ccache always includes this environment variable into the hash
|
# - 4.0+ ccache always includes this environment variable into the hash
|
||||||
# of the manifest, which do not allow to use previous cache,
|
# of the manifest, which do not allow to use previous cache,
|
||||||
# - 4.2+ ccache ignores SOURCE_DATE_EPOCH under time_macros sloppiness.
|
# - 4.2+ ccache ignores SOURCE_DATE_EPOCH for every file w/o __DATE__/__TIME__
|
||||||
#
|
#
|
||||||
# So for:
|
# So for:
|
||||||
# - 4.2+ time_macros sloppiness is used,
|
# - 4.2+ does not require any sloppiness
|
||||||
# - 4.0+ will ignore SOURCE_DATE_EPOCH environment variable.
|
# - 4.0+ will ignore SOURCE_DATE_EPOCH environment variable.
|
||||||
if (CCACHE_VERSION VERSION_GREATER_EQUAL "4.2")
|
if (CCACHE_VERSION VERSION_GREATER_EQUAL "4.2")
|
||||||
message(STATUS "Use time_macros sloppiness for ccache")
|
message(STATUS "ccache is 4.2+ no quirks for SOURCE_DATE_EPOCH required")
|
||||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CCACHE_FOUND} --set-config=sloppiness=time_macros")
|
|
||||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK "${CCACHE_FOUND} --set-config=sloppiness=time_macros")
|
|
||||||
elseif (CCACHE_VERSION VERSION_GREATER_EQUAL "4.0")
|
elseif (CCACHE_VERSION VERSION_GREATER_EQUAL "4.0")
|
||||||
message(STATUS "Ignore SOURCE_DATE_EPOCH for ccache")
|
message(STATUS "Ignore SOURCE_DATE_EPOCH for ccache")
|
||||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE "env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND}")
|
set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE "env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND}")
|
||||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK "env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND}")
|
set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK "env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND}")
|
||||||
else()
|
|
||||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND})
|
|
||||||
set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
|
|
||||||
endif()
|
endif()
|
||||||
else ()
|
else ()
|
||||||
message(${RECONFIGURE_MESSAGE_LEVEL} "Not using ${CCACHE_FOUND} ${CCACHE_VERSION} bug: https://bugzilla.samba.org/show_bug.cgi?id=8118")
|
message(${RECONFIGURE_MESSAGE_LEVEL} "Not using ${CCACHE_FOUND} ${CCACHE_VERSION} bug: https://bugzilla.samba.org/show_bug.cgi?id=8118")
|
||||||
|
29
cmake/find/datasketches.cmake
Normal file
29
cmake/find/datasketches.cmake
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
option (ENABLE_DATASKETCHES "Enable DataSketches" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
|
if (ENABLE_DATASKETCHES)
|
||||||
|
|
||||||
|
option (USE_INTERNAL_DATASKETCHES_LIBRARY "Set to FALSE to use system DataSketches library instead of bundled" ${NOT_UNBUNDLED})
|
||||||
|
|
||||||
|
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/CMakeLists.txt")
|
||||||
|
if (USE_INTERNAL_DATASKETCHES_LIBRARY)
|
||||||
|
message(WARNING "submodule contrib/datasketches-cpp is missing. to fix try run: \n git submodule update --init --recursive")
|
||||||
|
endif()
|
||||||
|
set(MISSING_INTERNAL_DATASKETCHES_LIBRARY 1)
|
||||||
|
set(USE_INTERNAL_DATASKETCHES_LIBRARY 0)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (USE_INTERNAL_DATASKETCHES_LIBRARY)
|
||||||
|
set(DATASKETCHES_LIBRARY theta)
|
||||||
|
set(DATASKETCHES_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include" "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include")
|
||||||
|
elseif (NOT MISSING_INTERNAL_DATASKETCHES_LIBRARY)
|
||||||
|
find_library(DATASKETCHES_LIBRARY theta)
|
||||||
|
find_path(DATASKETCHES_INCLUDE_DIR NAMES theta_sketch.hpp PATHS ${DATASKETCHES_INCLUDE_PATHS})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (DATASKETCHES_LIBRARY AND DATASKETCHES_INCLUDE_DIR)
|
||||||
|
set(USE_DATASKETCHES 1)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
endif()
|
||||||
|
|
||||||
|
message (STATUS "Using datasketches=${USE_DATASKETCHES}: ${DATASKETCHES_INCLUDE_DIR} : ${DATASKETCHES_LIBRARY}")
|
@ -1,7 +1,7 @@
|
|||||||
if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT OS_DARWIN)
|
if(ARCH_AMD64 AND NOT OS_FREEBSD AND NOT OS_DARWIN)
|
||||||
option(ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Mikhail Parakhin" ${ENABLE_LIBRARIES})
|
option(ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Mikhail Parakhin" ${ENABLE_LIBRARIES})
|
||||||
elseif(ENABLE_FASTOPS)
|
elseif(ENABLE_FASTOPS)
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Fastops library is not supported on ARM, FreeBSD and Darwin")
|
message (${RECONFIGURE_MESSAGE_LEVEL} "Fastops library is supported on x86_64 only, and not FreeBSD or Darwin")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT ENABLE_FASTOPS)
|
if(NOT ENABLE_FASTOPS)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND USE_PROTOBUF)
|
if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND USE_PROTOBUF AND NOT ARCH_PPC64LE)
|
||||||
option(ENABLE_HDFS "Enable HDFS" ${ENABLE_LIBRARIES})
|
option(ENABLE_HDFS "Enable HDFS" ${ENABLE_LIBRARIES})
|
||||||
elseif(ENABLE_HDFS OR USE_INTERNAL_HDFS3_LIBRARY)
|
elseif(ENABLE_HDFS OR USE_INTERNAL_HDFS3_LIBRARY)
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use HDFS3 with current configuration")
|
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use HDFS3 with current configuration")
|
||||||
|
@ -5,8 +5,8 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/krb5/README")
|
|||||||
set (ENABLE_KRB5 0)
|
set (ENABLE_KRB5 0)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux")
|
if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT (CMAKE_SYSTEM_NAME MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING))
|
||||||
message (WARNING "krb5 disabled in non-Linux environments")
|
message (WARNING "krb5 disabled in non-Linux and non-native-Darwin environments")
|
||||||
set (ENABLE_KRB5 0)
|
set (ENABLE_KRB5 0)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
@ -62,8 +62,10 @@ if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
|
|||||||
if (
|
if (
|
||||||
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "x86_64" ) OR
|
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "x86_64" ) OR
|
||||||
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR
|
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR
|
||||||
|
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "ppc64le" ) OR
|
||||||
( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR
|
( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR
|
||||||
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" )
|
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) OR
|
||||||
|
( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "aarch64" )
|
||||||
)
|
)
|
||||||
set (_ldap_supported_platform TRUE)
|
set (_ldap_supported_platform TRUE)
|
||||||
endif ()
|
endif ()
|
||||||
|
16
cmake/find/nanodbc.cmake
Normal file
16
cmake/find/nanodbc.cmake
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
if (NOT ENABLE_ODBC)
|
||||||
|
return ()
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
if (NOT USE_INTERNAL_NANODBC_LIBRARY)
|
||||||
|
message (FATAL_ERROR "Only the bundled nanodbc library can be used")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/CMakeLists.txt")
|
||||||
|
message (FATAL_ERROR "submodule contrib/nanodbc is missing. to fix try run: \n git submodule update --init --recursive")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set (NANODBC_LIBRARY nanodbc)
|
||||||
|
set (NANODBC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/nanodbc")
|
||||||
|
|
||||||
|
message (STATUS "Using nanodbc: ${NANODBC_INCLUDE_DIR} : ${NANODBC_LIBRARY}")
|
@ -20,5 +20,5 @@ if (NOT OS_FREEBSD)
|
|||||||
message (STATUS "Using NuRaft=${USE_NURAFT}: ${NURAFT_INCLUDE_DIR} : ${NURAFT_LIBRARY}")
|
message (STATUS "Using NuRaft=${USE_NURAFT}: ${NURAFT_INCLUDE_DIR} : ${NURAFT_LIBRARY}")
|
||||||
else()
|
else()
|
||||||
set (USE_NURAFT 0)
|
set (USE_NURAFT 0)
|
||||||
message (STATUS "Using internal NuRaft library on FreeBSD is not supported")
|
message (STATUS "Using internal NuRaft library on FreeBSD and Darwin is not supported")
|
||||||
endif()
|
endif()
|
||||||
|
@ -50,4 +50,6 @@ if (NOT EXTERNAL_ODBC_LIBRARY_FOUND)
|
|||||||
set (USE_INTERNAL_ODBC_LIBRARY 1)
|
set (USE_INTERNAL_ODBC_LIBRARY 1)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
set (USE_INTERNAL_NANODBC_LIBRARY 1)
|
||||||
|
|
||||||
message (STATUS "Using unixodbc")
|
message (STATUS "Using unixodbc")
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
if (OS_DARWIN AND ARCH_AARCH64)
|
||||||
|
set (ENABLE_ROCKSDB OFF CACHE INTERNAL "")
|
||||||
|
endif()
|
||||||
|
|
||||||
option(ENABLE_ROCKSDB "Enable ROCKSDB" ${ENABLE_LIBRARIES})
|
option(ENABLE_ROCKSDB "Enable ROCKSDB" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT ENABLE_ROCKSDB)
|
if (NOT ENABLE_ROCKSDB)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
if(NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_ARM)
|
if(NOT OS_FREEBSD AND NOT APPLE)
|
||||||
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
|
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
|
||||||
elseif(ENABLE_S3 OR USE_INTERNAL_AWS_S3_LIBRARY)
|
elseif(ENABLE_S3 OR USE_INTERNAL_AWS_S3_LIBRARY)
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on ARM, Apple or FreeBSD")
|
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on Apple or FreeBSD")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT ENABLE_S3)
|
if(NOT ENABLE_S3)
|
||||||
|
27
cmake/find/xz.cmake
Normal file
27
cmake/find/xz.cmake
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
option (USE_INTERNAL_XZ_LIBRARY "Set to OFF to use system xz (lzma) library instead of bundled" ${NOT_UNBUNDLED})
|
||||||
|
|
||||||
|
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/xz/src/liblzma/api/lzma.h")
|
||||||
|
if(USE_INTERNAL_XZ_LIBRARY)
|
||||||
|
message(WARNING "submodule contrib/xz is missing. to fix try run: \n git submodule update --init --recursive")
|
||||||
|
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal xz (lzma) library")
|
||||||
|
set(USE_INTERNAL_XZ_LIBRARY 0)
|
||||||
|
endif()
|
||||||
|
set(MISSING_INTERNAL_XZ_LIBRARY 1)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (NOT USE_INTERNAL_XZ_LIBRARY)
|
||||||
|
find_library (XZ_LIBRARY lzma)
|
||||||
|
find_path (XZ_INCLUDE_DIR NAMES lzma.h PATHS ${XZ_INCLUDE_PATHS})
|
||||||
|
if (NOT XZ_LIBRARY OR NOT XZ_INCLUDE_DIR)
|
||||||
|
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find system xz (lzma) library")
|
||||||
|
endif ()
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
if (XZ_LIBRARY AND XZ_INCLUDE_DIR)
|
||||||
|
elseif (NOT MISSING_INTERNAL_XZ_LIBRARY)
|
||||||
|
set (USE_INTERNAL_XZ_LIBRARY 1)
|
||||||
|
set (XZ_LIBRARY liblzma)
|
||||||
|
set (XZ_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/xz/src/liblzma/api)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
message (STATUS "Using xz (lzma): ${XZ_INCLUDE_DIR} : ${XZ_LIBRARY}")
|
@ -6,7 +6,7 @@ set (DEFAULT_LIBS "-nodefaultlibs")
|
|||||||
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
||||||
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
||||||
if (COMPILER_CLANG AND NOT (CMAKE_CROSSCOMPILING AND ARCH_AARCH64))
|
if (COMPILER_CLANG AND NOT (CMAKE_CROSSCOMPILING AND ARCH_AARCH64))
|
||||||
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
else ()
|
else ()
|
||||||
set (BUILTINS_LIBRARY "-lgcc")
|
set (BUILTINS_LIBRARY "-lgcc")
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -75,14 +75,14 @@ if (OS_LINUX AND NOT LINKER_NAME)
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (LINKER_NAME)
|
if (LINKER_NAME)
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0))
|
||||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LINKER_NAME}")
|
||||||
|
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LINKER_NAME}")
|
||||||
|
else ()
|
||||||
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
||||||
|
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
||||||
|
endif ()
|
||||||
|
|
||||||
message(STATUS "Using custom linker by name: ${LINKER_NAME}")
|
message(STATUS "Using custom linker by name: ${LINKER_NAME}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (ARCH_PPC64LE)
|
|
||||||
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
|
|
||||||
message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
|
|
||||||
endif ()
|
|
||||||
endif ()
|
|
||||||
|
@ -11,11 +11,6 @@ if (NOT MSVC)
|
|||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (USE_DEBUG_HELPERS)
|
|
||||||
set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/src/Core/iostream_debug_helpers.h")
|
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
|
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
|
||||||
# Intended for exploration of new compiler warnings that may be found useful.
|
# Intended for exploration of new compiler warnings that may be found useful.
|
||||||
# Applies to clang only
|
# Applies to clang only
|
||||||
@ -176,6 +171,7 @@ elseif (COMPILER_GCC)
|
|||||||
add_cxx_compile_options(-Wtrampolines)
|
add_cxx_compile_options(-Wtrampolines)
|
||||||
# Obvious
|
# Obvious
|
||||||
add_cxx_compile_options(-Wunused)
|
add_cxx_compile_options(-Wunused)
|
||||||
|
add_cxx_compile_options(-Wundef)
|
||||||
# Warn if vector operation is not implemented via SIMD capabilities of the architecture
|
# Warn if vector operation is not implemented via SIMD capabilities of the architecture
|
||||||
add_cxx_compile_options(-Wvector-operation-performance)
|
add_cxx_compile_options(-Wvector-operation-performance)
|
||||||
# XXX: libstdc++ has some of these for 3way compare
|
# XXX: libstdc++ has some of these for 3way compare
|
||||||
|
46
contrib/CMakeLists.txt
vendored
46
contrib/CMakeLists.txt
vendored
@ -1,4 +1,3 @@
|
|||||||
# Third-party libraries may have substandard code.
|
|
||||||
|
|
||||||
# Put all targets defined here and in added subfolders under "contrib/" folder in GUI-based IDEs by default.
|
# Put all targets defined here and in added subfolders under "contrib/" folder in GUI-based IDEs by default.
|
||||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they will
|
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they will
|
||||||
@ -11,8 +10,10 @@ else ()
|
|||||||
endif ()
|
endif ()
|
||||||
unset (_current_dir_name)
|
unset (_current_dir_name)
|
||||||
|
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
|
# Third-party libraries may have substandard code.
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
|
# Also remove a possible source of nondeterminism.
|
||||||
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w -D__DATE__= -D__TIME__= -D__TIMESTAMP__=")
|
||||||
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -D__DATE__= -D__TIME__= -D__TIMESTAMP__=")
|
||||||
|
|
||||||
if (WITH_COVERAGE)
|
if (WITH_COVERAGE)
|
||||||
set (WITHOUT_COVERAGE_LIST ${WITHOUT_COVERAGE})
|
set (WITHOUT_COVERAGE_LIST ${WITHOUT_COVERAGE})
|
||||||
@ -32,12 +33,12 @@ endif()
|
|||||||
|
|
||||||
set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1)
|
set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1)
|
||||||
|
|
||||||
|
add_subdirectory (abseil-cpp-cmake)
|
||||||
add_subdirectory (antlr4-runtime-cmake)
|
add_subdirectory (antlr4-runtime-cmake)
|
||||||
add_subdirectory (boost-cmake)
|
add_subdirectory (boost-cmake)
|
||||||
add_subdirectory (cctz-cmake)
|
add_subdirectory (cctz-cmake)
|
||||||
add_subdirectory (consistent-hashing)
|
add_subdirectory (consistent-hashing)
|
||||||
add_subdirectory (dragonbox-cmake)
|
add_subdirectory (dragonbox-cmake)
|
||||||
add_subdirectory (FastMemcpy)
|
|
||||||
add_subdirectory (hyperscan-cmake)
|
add_subdirectory (hyperscan-cmake)
|
||||||
add_subdirectory (jemalloc-cmake)
|
add_subdirectory (jemalloc-cmake)
|
||||||
add_subdirectory (libcpuid-cmake)
|
add_subdirectory (libcpuid-cmake)
|
||||||
@ -47,7 +48,11 @@ add_subdirectory (lz4-cmake)
|
|||||||
add_subdirectory (murmurhash)
|
add_subdirectory (murmurhash)
|
||||||
add_subdirectory (replxx-cmake)
|
add_subdirectory (replxx-cmake)
|
||||||
add_subdirectory (unixodbc-cmake)
|
add_subdirectory (unixodbc-cmake)
|
||||||
add_subdirectory (xz)
|
add_subdirectory (nanodbc-cmake)
|
||||||
|
|
||||||
|
if (USE_INTERNAL_XZ_LIBRARY)
|
||||||
|
add_subdirectory (xz)
|
||||||
|
endif()
|
||||||
|
|
||||||
add_subdirectory (poco-cmake)
|
add_subdirectory (poco-cmake)
|
||||||
add_subdirectory (croaring-cmake)
|
add_subdirectory (croaring-cmake)
|
||||||
@ -93,14 +98,8 @@ if (USE_INTERNAL_ZLIB_LIBRARY)
|
|||||||
add_subdirectory (${INTERNAL_ZLIB_NAME})
|
add_subdirectory (${INTERNAL_ZLIB_NAME})
|
||||||
# We should use same defines when including zlib.h as used when zlib compiled
|
# We should use same defines when including zlib.h as used when zlib compiled
|
||||||
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
||||||
if (TARGET zlibstatic)
|
|
||||||
target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
|
||||||
endif ()
|
|
||||||
if (ARCH_AMD64 OR ARCH_AARCH64)
|
if (ARCH_AMD64 OR ARCH_AARCH64)
|
||||||
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
|
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
|
||||||
if (TARGET zlibstatic)
|
|
||||||
target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK)
|
|
||||||
endif ()
|
|
||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
@ -215,15 +214,17 @@ if (USE_EMBEDDED_COMPILER AND USE_INTERNAL_LLVM_LIBRARY)
|
|||||||
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
|
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
|
||||||
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
|
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
|
||||||
set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE STRING "")
|
set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE STRING "")
|
||||||
# Yes it is set globally, but this is not enough, since llvm will add -std=c++11 after default
|
|
||||||
# And c++2a cannot be used, due to ambiguous operator !=
|
# Need to use C++17 since the compilation is not possible with C++20 currently, due to ambiguous operator != etc.
|
||||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
# LLVM project will set its default value for the -std=... but our global setting from CMake will override it.
|
||||||
set (_CXX_STANDARD "gnu++17")
|
set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
|
||||||
else()
|
set (CMAKE_CXX_STANDARD 17)
|
||||||
set (_CXX_STANDARD "c++17")
|
|
||||||
endif()
|
|
||||||
set (LLVM_CXX_STD ${_CXX_STANDARD} CACHE STRING "" FORCE)
|
|
||||||
add_subdirectory (llvm/llvm)
|
add_subdirectory (llvm/llvm)
|
||||||
|
|
||||||
|
set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
|
||||||
|
unset (CMAKE_CXX_STANDARD_bak)
|
||||||
|
|
||||||
target_include_directories(LLVMSupport SYSTEM BEFORE PRIVATE ${ZLIB_INCLUDE_DIR})
|
target_include_directories(LLVMSupport SYSTEM BEFORE PRIVATE ${ZLIB_INCLUDE_DIR})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
@ -280,7 +281,14 @@ if (USE_AMQPCPP)
|
|||||||
add_subdirectory (amqpcpp-cmake)
|
add_subdirectory (amqpcpp-cmake)
|
||||||
endif()
|
endif()
|
||||||
if (USE_CASSANDRA)
|
if (USE_CASSANDRA)
|
||||||
|
# Need to use C++17 since the compilation is not possible with C++20 currently.
|
||||||
|
set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
|
||||||
|
set (CMAKE_CXX_STANDARD 17)
|
||||||
|
|
||||||
add_subdirectory (cassandra)
|
add_subdirectory (cassandra)
|
||||||
|
|
||||||
|
set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
|
||||||
|
unset (CMAKE_CXX_STANDARD_bak)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Should go before:
|
# Should go before:
|
||||||
|
@ -1,28 +0,0 @@
|
|||||||
option (ENABLE_FASTMEMCPY "Enable FastMemcpy library (only internal)" ${ENABLE_LIBRARIES})
|
|
||||||
|
|
||||||
if (NOT OS_LINUX OR ARCH_AARCH64)
|
|
||||||
set (ENABLE_FASTMEMCPY OFF)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (ENABLE_FASTMEMCPY)
|
|
||||||
set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/FastMemcpy)
|
|
||||||
|
|
||||||
set (SRCS
|
|
||||||
${LIBRARY_DIR}/FastMemcpy.c
|
|
||||||
|
|
||||||
memcpy_wrapper.c
|
|
||||||
)
|
|
||||||
|
|
||||||
add_library (FastMemcpy ${SRCS})
|
|
||||||
target_include_directories (FastMemcpy PUBLIC ${LIBRARY_DIR})
|
|
||||||
|
|
||||||
target_compile_definitions(FastMemcpy PUBLIC USE_FASTMEMCPY=1)
|
|
||||||
|
|
||||||
message (STATUS "Using FastMemcpy")
|
|
||||||
else ()
|
|
||||||
add_library (FastMemcpy INTERFACE)
|
|
||||||
|
|
||||||
target_compile_definitions(FastMemcpy INTERFACE USE_FASTMEMCPY=0)
|
|
||||||
|
|
||||||
message (STATUS "Not using FastMemcpy")
|
|
||||||
endif ()
|
|
@ -1,220 +0,0 @@
|
|||||||
//=====================================================================
|
|
||||||
//
|
|
||||||
// FastMemcpy.c - skywind3000@163.com, 2015
|
|
||||||
//
|
|
||||||
// feature:
|
|
||||||
// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc4.9)
|
|
||||||
//
|
|
||||||
//=====================================================================
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <time.h>
|
|
||||||
|
|
||||||
#if (defined(_WIN32) || defined(WIN32))
|
|
||||||
#include <windows.h>
|
|
||||||
#include <mmsystem.h>
|
|
||||||
#ifdef _MSC_VER
|
|
||||||
#pragma comment(lib, "winmm.lib")
|
|
||||||
#endif
|
|
||||||
#elif defined(__unix)
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#else
|
|
||||||
#error it can only be compiled under windows or unix
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "FastMemcpy.h"
|
|
||||||
|
|
||||||
unsigned int gettime()
|
|
||||||
{
|
|
||||||
#if (defined(_WIN32) || defined(WIN32))
|
|
||||||
return timeGetTime();
|
|
||||||
#else
|
|
||||||
static struct timezone tz={ 0,0 };
|
|
||||||
struct timeval time;
|
|
||||||
gettimeofday(&time,&tz);
|
|
||||||
return (time.tv_sec * 1000 + time.tv_usec / 1000);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void sleepms(unsigned int millisec)
|
|
||||||
{
|
|
||||||
#if defined(_WIN32) || defined(WIN32)
|
|
||||||
Sleep(millisec);
|
|
||||||
#else
|
|
||||||
usleep(millisec * 1000);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void benchmark(int dstalign, int srcalign, size_t size, int times)
|
|
||||||
{
|
|
||||||
char *DATA1 = (char*)malloc(size + 64);
|
|
||||||
char *DATA2 = (char*)malloc(size + 64);
|
|
||||||
size_t LINEAR1 = ((size_t)DATA1);
|
|
||||||
size_t LINEAR2 = ((size_t)DATA2);
|
|
||||||
char *ALIGN1 = (char*)(((64 - (LINEAR1 & 63)) & 63) + LINEAR1);
|
|
||||||
char *ALIGN2 = (char*)(((64 - (LINEAR2 & 63)) & 63) + LINEAR2);
|
|
||||||
char *dst = (dstalign)? ALIGN1 : (ALIGN1 + 1);
|
|
||||||
char *src = (srcalign)? ALIGN2 : (ALIGN2 + 3);
|
|
||||||
unsigned int t1, t2;
|
|
||||||
int k;
|
|
||||||
|
|
||||||
sleepms(100);
|
|
||||||
t1 = gettime();
|
|
||||||
for (k = times; k > 0; k--) {
|
|
||||||
memcpy(dst, src, size);
|
|
||||||
}
|
|
||||||
t1 = gettime() - t1;
|
|
||||||
sleepms(100);
|
|
||||||
t2 = gettime();
|
|
||||||
for (k = times; k > 0; k--) {
|
|
||||||
memcpy_fast(dst, src, size);
|
|
||||||
}
|
|
||||||
t2 = gettime() - t2;
|
|
||||||
|
|
||||||
free(DATA1);
|
|
||||||
free(DATA2);
|
|
||||||
|
|
||||||
printf("result(dst %s, src %s): memcpy_fast=%dms memcpy=%d ms\n",
|
|
||||||
dstalign? "aligned" : "unalign",
|
|
||||||
srcalign? "aligned" : "unalign", (int)t2, (int)t1);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void bench(int copysize, int times)
|
|
||||||
{
|
|
||||||
printf("benchmark(size=%d bytes, times=%d):\n", copysize, times);
|
|
||||||
benchmark(1, 1, copysize, times);
|
|
||||||
benchmark(1, 0, copysize, times);
|
|
||||||
benchmark(0, 1, copysize, times);
|
|
||||||
benchmark(0, 0, copysize, times);
|
|
||||||
printf("\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void random_bench(int maxsize, int times)
|
|
||||||
{
|
|
||||||
static char A[11 * 1024 * 1024 + 2];
|
|
||||||
static char B[11 * 1024 * 1024 + 2];
|
|
||||||
static int random_offsets[0x10000];
|
|
||||||
static int random_sizes[0x8000];
|
|
||||||
unsigned int i, p1, p2;
|
|
||||||
unsigned int t1, t2;
|
|
||||||
for (i = 0; i < 0x10000; i++) { // generate random offsets
|
|
||||||
random_offsets[i] = rand() % (10 * 1024 * 1024 + 1);
|
|
||||||
}
|
|
||||||
for (i = 0; i < 0x8000; i++) { // generate random sizes
|
|
||||||
random_sizes[i] = 1 + rand() % maxsize;
|
|
||||||
}
|
|
||||||
sleepms(100);
|
|
||||||
t1 = gettime();
|
|
||||||
for (p1 = 0, p2 = 0, i = 0; i < times; i++) {
|
|
||||||
int offset1 = random_offsets[(p1++) & 0xffff];
|
|
||||||
int offset2 = random_offsets[(p1++) & 0xffff];
|
|
||||||
int size = random_sizes[(p2++) & 0x7fff];
|
|
||||||
memcpy(A + offset1, B + offset2, size);
|
|
||||||
}
|
|
||||||
t1 = gettime() - t1;
|
|
||||||
sleepms(100);
|
|
||||||
t2 = gettime();
|
|
||||||
for (p1 = 0, p2 = 0, i = 0; i < times; i++) {
|
|
||||||
int offset1 = random_offsets[(p1++) & 0xffff];
|
|
||||||
int offset2 = random_offsets[(p1++) & 0xffff];
|
|
||||||
int size = random_sizes[(p2++) & 0x7fff];
|
|
||||||
memcpy_fast(A + offset1, B + offset2, size);
|
|
||||||
}
|
|
||||||
t2 = gettime() - t2;
|
|
||||||
printf("benchmark random access:\n");
|
|
||||||
printf("memcpy_fast=%dms memcpy=%dms\n\n", (int)t2, (int)t1);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
|
||||||
#pragma comment(lib, "winmm.lib")
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int main(void)
|
|
||||||
{
|
|
||||||
bench(32, 0x1000000);
|
|
||||||
bench(64, 0x1000000);
|
|
||||||
bench(512, 0x800000);
|
|
||||||
bench(1024, 0x400000);
|
|
||||||
bench(4096, 0x80000);
|
|
||||||
bench(8192, 0x40000);
|
|
||||||
bench(1024 * 1024 * 1, 0x800);
|
|
||||||
bench(1024 * 1024 * 4, 0x200);
|
|
||||||
bench(1024 * 1024 * 8, 0x100);
|
|
||||||
|
|
||||||
random_bench(2048, 8000000);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
benchmark(size=32 bytes, times=16777216):
|
|
||||||
result(dst aligned, src aligned): memcpy_fast=78ms memcpy=260 ms
|
|
||||||
result(dst aligned, src unalign): memcpy_fast=78ms memcpy=250 ms
|
|
||||||
result(dst unalign, src aligned): memcpy_fast=78ms memcpy=266 ms
|
|
||||||
result(dst unalign, src unalign): memcpy_fast=78ms memcpy=234 ms
|
|
||||||
|
|
||||||
benchmark(size=64 bytes, times=16777216):
|
|
||||||
result(dst aligned, src aligned): memcpy_fast=109ms memcpy=281 ms
|
|
||||||
result(dst aligned, src unalign): memcpy_fast=109ms memcpy=328 ms
|
|
||||||
result(dst unalign, src aligned): memcpy_fast=109ms memcpy=343 ms
|
|
||||||
result(dst unalign, src unalign): memcpy_fast=93ms memcpy=344 ms
|
|
||||||
|
|
||||||
benchmark(size=512 bytes, times=8388608):
|
|
||||||
result(dst aligned, src aligned): memcpy_fast=125ms memcpy=218 ms
|
|
||||||
result(dst aligned, src unalign): memcpy_fast=156ms memcpy=484 ms
|
|
||||||
result(dst unalign, src aligned): memcpy_fast=172ms memcpy=546 ms
|
|
||||||
result(dst unalign, src unalign): memcpy_fast=172ms memcpy=515 ms
|
|
||||||
|
|
||||||
benchmark(size=1024 bytes, times=4194304):
|
|
||||||
result(dst aligned, src aligned): memcpy_fast=109ms memcpy=172 ms
|
|
||||||
result(dst aligned, src unalign): memcpy_fast=187ms memcpy=453 ms
|
|
||||||
result(dst unalign, src aligned): memcpy_fast=172ms memcpy=437 ms
|
|
||||||
result(dst unalign, src unalign): memcpy_fast=156ms memcpy=452 ms
|
|
||||||
|
|
||||||
benchmark(size=4096 bytes, times=524288):
|
|
||||||
result(dst aligned, src aligned): memcpy_fast=62ms memcpy=78 ms
|
|
||||||
result(dst aligned, src unalign): memcpy_fast=109ms memcpy=202 ms
|
|
||||||
result(dst unalign, src aligned): memcpy_fast=94ms memcpy=203 ms
|
|
||||||
result(dst unalign, src unalign): memcpy_fast=110ms memcpy=218 ms
|
|
||||||
|
|
||||||
benchmark(size=8192 bytes, times=262144):
|
|
||||||
result(dst aligned, src aligned): memcpy_fast=62ms memcpy=78 ms
|
|
||||||
result(dst aligned, src unalign): memcpy_fast=78ms memcpy=202 ms
|
|
||||||
result(dst unalign, src aligned): memcpy_fast=78ms memcpy=203 ms
|
|
||||||
result(dst unalign, src unalign): memcpy_fast=94ms memcpy=203 ms
|
|
||||||
|
|
||||||
benchmark(size=1048576 bytes, times=2048):
|
|
||||||
result(dst aligned, src aligned): memcpy_fast=203ms memcpy=191 ms
|
|
||||||
result(dst aligned, src unalign): memcpy_fast=219ms memcpy=281 ms
|
|
||||||
result(dst unalign, src aligned): memcpy_fast=218ms memcpy=328 ms
|
|
||||||
result(dst unalign, src unalign): memcpy_fast=218ms memcpy=312 ms
|
|
||||||
|
|
||||||
benchmark(size=4194304 bytes, times=512):
|
|
||||||
result(dst aligned, src aligned): memcpy_fast=312ms memcpy=406 ms
|
|
||||||
result(dst aligned, src unalign): memcpy_fast=296ms memcpy=421 ms
|
|
||||||
result(dst unalign, src aligned): memcpy_fast=312ms memcpy=468 ms
|
|
||||||
result(dst unalign, src unalign): memcpy_fast=297ms memcpy=452 ms
|
|
||||||
|
|
||||||
benchmark(size=8388608 bytes, times=256):
|
|
||||||
result(dst aligned, src aligned): memcpy_fast=281ms memcpy=452 ms
|
|
||||||
result(dst aligned, src unalign): memcpy_fast=280ms memcpy=468 ms
|
|
||||||
result(dst unalign, src aligned): memcpy_fast=298ms memcpy=514 ms
|
|
||||||
result(dst unalign, src unalign): memcpy_fast=344ms memcpy=472 ms
|
|
||||||
|
|
||||||
benchmark random access:
|
|
||||||
memcpy_fast=515ms memcpy=1014ms
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,694 +0,0 @@
|
|||||||
//=====================================================================
|
|
||||||
//
|
|
||||||
// FastMemcpy.c - skywind3000@163.com, 2015
|
|
||||||
//
|
|
||||||
// feature:
|
|
||||||
// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc5.1)
|
|
||||||
//
|
|
||||||
//=====================================================================
|
|
||||||
#ifndef __FAST_MEMCPY_H__
|
|
||||||
#define __FAST_MEMCPY_H__
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <emmintrin.h>
|
|
||||||
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
// force inline for compilers
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
#ifndef INLINE
|
|
||||||
#ifdef __GNUC__
|
|
||||||
#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1))
|
|
||||||
#define INLINE __inline__ __attribute__((always_inline))
|
|
||||||
#else
|
|
||||||
#define INLINE __inline__
|
|
||||||
#endif
|
|
||||||
#elif defined(_MSC_VER)
|
|
||||||
#define INLINE __forceinline
|
|
||||||
#elif (defined(__BORLANDC__) || defined(__WATCOMC__))
|
|
||||||
#define INLINE __inline
|
|
||||||
#else
|
|
||||||
#define INLINE
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef __attribute__((__aligned__(1))) uint16_t uint16_unaligned_t;
|
|
||||||
typedef __attribute__((__aligned__(1))) uint32_t uint32_unaligned_t;
|
|
||||||
typedef __attribute__((__aligned__(1))) uint64_t uint64_unaligned_t;
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
// fast copy for different sizes
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
static INLINE void memcpy_sse2_16(void *dst, const void *src) {
|
|
||||||
__m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 0, m0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void memcpy_sse2_32(void *dst, const void *src) {
|
|
||||||
__m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0);
|
|
||||||
__m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 0, m0);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 1, m1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void memcpy_sse2_64(void *dst, const void *src) {
|
|
||||||
__m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0);
|
|
||||||
__m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1);
|
|
||||||
__m128i m2 = _mm_loadu_si128(((const __m128i*)src) + 2);
|
|
||||||
__m128i m3 = _mm_loadu_si128(((const __m128i*)src) + 3);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 0, m0);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 1, m1);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 2, m2);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 3, m3);
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void memcpy_sse2_128(void *dst, const void *src) {
|
|
||||||
__m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0);
|
|
||||||
__m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1);
|
|
||||||
__m128i m2 = _mm_loadu_si128(((const __m128i*)src) + 2);
|
|
||||||
__m128i m3 = _mm_loadu_si128(((const __m128i*)src) + 3);
|
|
||||||
__m128i m4 = _mm_loadu_si128(((const __m128i*)src) + 4);
|
|
||||||
__m128i m5 = _mm_loadu_si128(((const __m128i*)src) + 5);
|
|
||||||
__m128i m6 = _mm_loadu_si128(((const __m128i*)src) + 6);
|
|
||||||
__m128i m7 = _mm_loadu_si128(((const __m128i*)src) + 7);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 0, m0);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 1, m1);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 2, m2);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 3, m3);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 4, m4);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 5, m5);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 6, m6);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 7, m7);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
// tiny memory copy with jump table optimized
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
/// Attribute is used to avoid an error with undefined behaviour sanitizer
|
|
||||||
/// ../contrib/FastMemcpy/FastMemcpy.h:91:56: runtime error: applying zero offset to null pointer
|
|
||||||
/// Found by 01307_orc_output_format.sh, cause - ORCBlockInputFormat and external ORC library.
|
|
||||||
__attribute__((__no_sanitize__("undefined"))) static INLINE void *memcpy_tiny(void *dst, const void *src, size_t size) {
|
|
||||||
unsigned char *dd = ((unsigned char*)dst) + size;
|
|
||||||
const unsigned char *ss = ((const unsigned char*)src) + size;
|
|
||||||
|
|
||||||
switch (size) {
|
|
||||||
case 64:
|
|
||||||
memcpy_sse2_64(dd - 64, ss - 64);
|
|
||||||
case 0:
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 65:
|
|
||||||
memcpy_sse2_64(dd - 65, ss - 65);
|
|
||||||
case 1:
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 66:
|
|
||||||
memcpy_sse2_64(dd - 66, ss - 66);
|
|
||||||
case 2:
|
|
||||||
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 67:
|
|
||||||
memcpy_sse2_64(dd - 67, ss - 67);
|
|
||||||
case 3:
|
|
||||||
*((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 68:
|
|
||||||
memcpy_sse2_64(dd - 68, ss - 68);
|
|
||||||
case 4:
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 69:
|
|
||||||
memcpy_sse2_64(dd - 69, ss - 69);
|
|
||||||
case 5:
|
|
||||||
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 70:
|
|
||||||
memcpy_sse2_64(dd - 70, ss - 70);
|
|
||||||
case 6:
|
|
||||||
*((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6));
|
|
||||||
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 71:
|
|
||||||
memcpy_sse2_64(dd - 71, ss - 71);
|
|
||||||
case 7:
|
|
||||||
*((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7));
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 72:
|
|
||||||
memcpy_sse2_64(dd - 72, ss - 72);
|
|
||||||
case 8:
|
|
||||||
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 73:
|
|
||||||
memcpy_sse2_64(dd - 73, ss - 73);
|
|
||||||
case 9:
|
|
||||||
*((uint64_unaligned_t*)(dd - 9)) = *((uint64_unaligned_t*)(ss - 9));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 74:
|
|
||||||
memcpy_sse2_64(dd - 74, ss - 74);
|
|
||||||
case 10:
|
|
||||||
*((uint64_unaligned_t*)(dd - 10)) = *((uint64_unaligned_t*)(ss - 10));
|
|
||||||
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 75:
|
|
||||||
memcpy_sse2_64(dd - 75, ss - 75);
|
|
||||||
case 11:
|
|
||||||
*((uint64_unaligned_t*)(dd - 11)) = *((uint64_unaligned_t*)(ss - 11));
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 76:
|
|
||||||
memcpy_sse2_64(dd - 76, ss - 76);
|
|
||||||
case 12:
|
|
||||||
*((uint64_unaligned_t*)(dd - 12)) = *((uint64_unaligned_t*)(ss - 12));
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 77:
|
|
||||||
memcpy_sse2_64(dd - 77, ss - 77);
|
|
||||||
case 13:
|
|
||||||
*((uint64_unaligned_t*)(dd - 13)) = *((uint64_unaligned_t*)(ss - 13));
|
|
||||||
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 78:
|
|
||||||
memcpy_sse2_64(dd - 78, ss - 78);
|
|
||||||
case 14:
|
|
||||||
*((uint64_unaligned_t*)(dd - 14)) = *((uint64_unaligned_t*)(ss - 14));
|
|
||||||
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 79:
|
|
||||||
memcpy_sse2_64(dd - 79, ss - 79);
|
|
||||||
case 15:
|
|
||||||
*((uint64_unaligned_t*)(dd - 15)) = *((uint64_unaligned_t*)(ss - 15));
|
|
||||||
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 80:
|
|
||||||
memcpy_sse2_64(dd - 80, ss - 80);
|
|
||||||
case 16:
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 81:
|
|
||||||
memcpy_sse2_64(dd - 81, ss - 81);
|
|
||||||
case 17:
|
|
||||||
memcpy_sse2_16(dd - 17, ss - 17);
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 82:
|
|
||||||
memcpy_sse2_64(dd - 82, ss - 82);
|
|
||||||
case 18:
|
|
||||||
memcpy_sse2_16(dd - 18, ss - 18);
|
|
||||||
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 83:
|
|
||||||
memcpy_sse2_64(dd - 83, ss - 83);
|
|
||||||
case 19:
|
|
||||||
memcpy_sse2_16(dd - 19, ss - 19);
|
|
||||||
*((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 84:
|
|
||||||
memcpy_sse2_64(dd - 84, ss - 84);
|
|
||||||
case 20:
|
|
||||||
memcpy_sse2_16(dd - 20, ss - 20);
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 85:
|
|
||||||
memcpy_sse2_64(dd - 85, ss - 85);
|
|
||||||
case 21:
|
|
||||||
memcpy_sse2_16(dd - 21, ss - 21);
|
|
||||||
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 86:
|
|
||||||
memcpy_sse2_64(dd - 86, ss - 86);
|
|
||||||
case 22:
|
|
||||||
memcpy_sse2_16(dd - 22, ss - 22);
|
|
||||||
*((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6));
|
|
||||||
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 87:
|
|
||||||
memcpy_sse2_64(dd - 87, ss - 87);
|
|
||||||
case 23:
|
|
||||||
memcpy_sse2_16(dd - 23, ss - 23);
|
|
||||||
*((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7));
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 88:
|
|
||||||
memcpy_sse2_64(dd - 88, ss - 88);
|
|
||||||
case 24:
|
|
||||||
memcpy_sse2_16(dd - 24, ss - 24);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 89:
|
|
||||||
memcpy_sse2_64(dd - 89, ss - 89);
|
|
||||||
case 25:
|
|
||||||
memcpy_sse2_16(dd - 25, ss - 25);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 90:
|
|
||||||
memcpy_sse2_64(dd - 90, ss - 90);
|
|
||||||
case 26:
|
|
||||||
memcpy_sse2_16(dd - 26, ss - 26);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 91:
|
|
||||||
memcpy_sse2_64(dd - 91, ss - 91);
|
|
||||||
case 27:
|
|
||||||
memcpy_sse2_16(dd - 27, ss - 27);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 92:
|
|
||||||
memcpy_sse2_64(dd - 92, ss - 92);
|
|
||||||
case 28:
|
|
||||||
memcpy_sse2_16(dd - 28, ss - 28);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 93:
|
|
||||||
memcpy_sse2_64(dd - 93, ss - 93);
|
|
||||||
case 29:
|
|
||||||
memcpy_sse2_16(dd - 29, ss - 29);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 94:
|
|
||||||
memcpy_sse2_64(dd - 94, ss - 94);
|
|
||||||
case 30:
|
|
||||||
memcpy_sse2_16(dd - 30, ss - 30);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 95:
|
|
||||||
memcpy_sse2_64(dd - 95, ss - 95);
|
|
||||||
case 31:
|
|
||||||
memcpy_sse2_16(dd - 31, ss - 31);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 96:
|
|
||||||
memcpy_sse2_64(dd - 96, ss - 96);
|
|
||||||
case 32:
|
|
||||||
memcpy_sse2_32(dd - 32, ss - 32);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 97:
|
|
||||||
memcpy_sse2_64(dd - 97, ss - 97);
|
|
||||||
case 33:
|
|
||||||
memcpy_sse2_32(dd - 33, ss - 33);
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 98:
|
|
||||||
memcpy_sse2_64(dd - 98, ss - 98);
|
|
||||||
case 34:
|
|
||||||
memcpy_sse2_32(dd - 34, ss - 34);
|
|
||||||
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 99:
|
|
||||||
memcpy_sse2_64(dd - 99, ss - 99);
|
|
||||||
case 35:
|
|
||||||
memcpy_sse2_32(dd - 35, ss - 35);
|
|
||||||
*((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 100:
|
|
||||||
memcpy_sse2_64(dd - 100, ss - 100);
|
|
||||||
case 36:
|
|
||||||
memcpy_sse2_32(dd - 36, ss - 36);
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 101:
|
|
||||||
memcpy_sse2_64(dd - 101, ss - 101);
|
|
||||||
case 37:
|
|
||||||
memcpy_sse2_32(dd - 37, ss - 37);
|
|
||||||
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 102:
|
|
||||||
memcpy_sse2_64(dd - 102, ss - 102);
|
|
||||||
case 38:
|
|
||||||
memcpy_sse2_32(dd - 38, ss - 38);
|
|
||||||
*((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6));
|
|
||||||
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 103:
|
|
||||||
memcpy_sse2_64(dd - 103, ss - 103);
|
|
||||||
case 39:
|
|
||||||
memcpy_sse2_32(dd - 39, ss - 39);
|
|
||||||
*((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7));
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 104:
|
|
||||||
memcpy_sse2_64(dd - 104, ss - 104);
|
|
||||||
case 40:
|
|
||||||
memcpy_sse2_32(dd - 40, ss - 40);
|
|
||||||
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 105:
|
|
||||||
memcpy_sse2_64(dd - 105, ss - 105);
|
|
||||||
case 41:
|
|
||||||
memcpy_sse2_32(dd - 41, ss - 41);
|
|
||||||
*((uint64_unaligned_t*)(dd - 9)) = *((uint64_unaligned_t*)(ss - 9));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 106:
|
|
||||||
memcpy_sse2_64(dd - 106, ss - 106);
|
|
||||||
case 42:
|
|
||||||
memcpy_sse2_32(dd - 42, ss - 42);
|
|
||||||
*((uint64_unaligned_t*)(dd - 10)) = *((uint64_unaligned_t*)(ss - 10));
|
|
||||||
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 107:
|
|
||||||
memcpy_sse2_64(dd - 107, ss - 107);
|
|
||||||
case 43:
|
|
||||||
memcpy_sse2_32(dd - 43, ss - 43);
|
|
||||||
*((uint64_unaligned_t*)(dd - 11)) = *((uint64_unaligned_t*)(ss - 11));
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 108:
|
|
||||||
memcpy_sse2_64(dd - 108, ss - 108);
|
|
||||||
case 44:
|
|
||||||
memcpy_sse2_32(dd - 44, ss - 44);
|
|
||||||
*((uint64_unaligned_t*)(dd - 12)) = *((uint64_unaligned_t*)(ss - 12));
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 109:
|
|
||||||
memcpy_sse2_64(dd - 109, ss - 109);
|
|
||||||
case 45:
|
|
||||||
memcpy_sse2_32(dd - 45, ss - 45);
|
|
||||||
*((uint64_unaligned_t*)(dd - 13)) = *((uint64_unaligned_t*)(ss - 13));
|
|
||||||
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 110:
|
|
||||||
memcpy_sse2_64(dd - 110, ss - 110);
|
|
||||||
case 46:
|
|
||||||
memcpy_sse2_32(dd - 46, ss - 46);
|
|
||||||
*((uint64_unaligned_t*)(dd - 14)) = *((uint64_unaligned_t*)(ss - 14));
|
|
||||||
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 111:
|
|
||||||
memcpy_sse2_64(dd - 111, ss - 111);
|
|
||||||
case 47:
|
|
||||||
memcpy_sse2_32(dd - 47, ss - 47);
|
|
||||||
*((uint64_unaligned_t*)(dd - 15)) = *((uint64_unaligned_t*)(ss - 15));
|
|
||||||
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 112:
|
|
||||||
memcpy_sse2_64(dd - 112, ss - 112);
|
|
||||||
case 48:
|
|
||||||
memcpy_sse2_32(dd - 48, ss - 48);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 113:
|
|
||||||
memcpy_sse2_64(dd - 113, ss - 113);
|
|
||||||
case 49:
|
|
||||||
memcpy_sse2_32(dd - 49, ss - 49);
|
|
||||||
memcpy_sse2_16(dd - 17, ss - 17);
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 114:
|
|
||||||
memcpy_sse2_64(dd - 114, ss - 114);
|
|
||||||
case 50:
|
|
||||||
memcpy_sse2_32(dd - 50, ss - 50);
|
|
||||||
memcpy_sse2_16(dd - 18, ss - 18);
|
|
||||||
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 115:
|
|
||||||
memcpy_sse2_64(dd - 115, ss - 115);
|
|
||||||
case 51:
|
|
||||||
memcpy_sse2_32(dd - 51, ss - 51);
|
|
||||||
memcpy_sse2_16(dd - 19, ss - 19);
|
|
||||||
*((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 116:
|
|
||||||
memcpy_sse2_64(dd - 116, ss - 116);
|
|
||||||
case 52:
|
|
||||||
memcpy_sse2_32(dd - 52, ss - 52);
|
|
||||||
memcpy_sse2_16(dd - 20, ss - 20);
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 117:
|
|
||||||
memcpy_sse2_64(dd - 117, ss - 117);
|
|
||||||
case 53:
|
|
||||||
memcpy_sse2_32(dd - 53, ss - 53);
|
|
||||||
memcpy_sse2_16(dd - 21, ss - 21);
|
|
||||||
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
|
|
||||||
dd[-1] = ss[-1];
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 118:
|
|
||||||
memcpy_sse2_64(dd - 118, ss - 118);
|
|
||||||
case 54:
|
|
||||||
memcpy_sse2_32(dd - 54, ss - 54);
|
|
||||||
memcpy_sse2_16(dd - 22, ss - 22);
|
|
||||||
*((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6));
|
|
||||||
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 119:
|
|
||||||
memcpy_sse2_64(dd - 119, ss - 119);
|
|
||||||
case 55:
|
|
||||||
memcpy_sse2_32(dd - 55, ss - 55);
|
|
||||||
memcpy_sse2_16(dd - 23, ss - 23);
|
|
||||||
*((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7));
|
|
||||||
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 120:
|
|
||||||
memcpy_sse2_64(dd - 120, ss - 120);
|
|
||||||
case 56:
|
|
||||||
memcpy_sse2_32(dd - 56, ss - 56);
|
|
||||||
memcpy_sse2_16(dd - 24, ss - 24);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 121:
|
|
||||||
memcpy_sse2_64(dd - 121, ss - 121);
|
|
||||||
case 57:
|
|
||||||
memcpy_sse2_32(dd - 57, ss - 57);
|
|
||||||
memcpy_sse2_16(dd - 25, ss - 25);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 122:
|
|
||||||
memcpy_sse2_64(dd - 122, ss - 122);
|
|
||||||
case 58:
|
|
||||||
memcpy_sse2_32(dd - 58, ss - 58);
|
|
||||||
memcpy_sse2_16(dd - 26, ss - 26);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 123:
|
|
||||||
memcpy_sse2_64(dd - 123, ss - 123);
|
|
||||||
case 59:
|
|
||||||
memcpy_sse2_32(dd - 59, ss - 59);
|
|
||||||
memcpy_sse2_16(dd - 27, ss - 27);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 124:
|
|
||||||
memcpy_sse2_64(dd - 124, ss - 124);
|
|
||||||
case 60:
|
|
||||||
memcpy_sse2_32(dd - 60, ss - 60);
|
|
||||||
memcpy_sse2_16(dd - 28, ss - 28);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 125:
|
|
||||||
memcpy_sse2_64(dd - 125, ss - 125);
|
|
||||||
case 61:
|
|
||||||
memcpy_sse2_32(dd - 61, ss - 61);
|
|
||||||
memcpy_sse2_16(dd - 29, ss - 29);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 126:
|
|
||||||
memcpy_sse2_64(dd - 126, ss - 126);
|
|
||||||
case 62:
|
|
||||||
memcpy_sse2_32(dd - 62, ss - 62);
|
|
||||||
memcpy_sse2_16(dd - 30, ss - 30);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 127:
|
|
||||||
memcpy_sse2_64(dd - 127, ss - 127);
|
|
||||||
case 63:
|
|
||||||
memcpy_sse2_32(dd - 63, ss - 63);
|
|
||||||
memcpy_sse2_16(dd - 31, ss - 31);
|
|
||||||
memcpy_sse2_16(dd - 16, ss - 16);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 128:
|
|
||||||
memcpy_sse2_128(dd - 128, ss - 128);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return dst;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
// main routine
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
static void* memcpy_fast(void *destination, const void *source, size_t size)
|
|
||||||
{
|
|
||||||
unsigned char *dst = (unsigned char*)destination;
|
|
||||||
const unsigned char *src = (const unsigned char*)source;
|
|
||||||
static size_t cachesize = 0x200000; // L2-cache size
|
|
||||||
size_t padding;
|
|
||||||
|
|
||||||
// small memory copy
|
|
||||||
if (size <= 128) {
|
|
||||||
return memcpy_tiny(dst, src, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// align destination to 16 bytes boundary
|
|
||||||
padding = (16 - (((size_t)dst) & 15)) & 15;
|
|
||||||
|
|
||||||
if (padding > 0) {
|
|
||||||
__m128i head = _mm_loadu_si128((const __m128i*)src);
|
|
||||||
_mm_storeu_si128((__m128i*)dst, head);
|
|
||||||
dst += padding;
|
|
||||||
src += padding;
|
|
||||||
size -= padding;
|
|
||||||
}
|
|
||||||
|
|
||||||
// medium size copy
|
|
||||||
if (size <= cachesize) {
|
|
||||||
__m128i c0, c1, c2, c3, c4, c5, c6, c7;
|
|
||||||
|
|
||||||
for (; size >= 128; size -= 128) {
|
|
||||||
c0 = _mm_loadu_si128(((const __m128i*)src) + 0);
|
|
||||||
c1 = _mm_loadu_si128(((const __m128i*)src) + 1);
|
|
||||||
c2 = _mm_loadu_si128(((const __m128i*)src) + 2);
|
|
||||||
c3 = _mm_loadu_si128(((const __m128i*)src) + 3);
|
|
||||||
c4 = _mm_loadu_si128(((const __m128i*)src) + 4);
|
|
||||||
c5 = _mm_loadu_si128(((const __m128i*)src) + 5);
|
|
||||||
c6 = _mm_loadu_si128(((const __m128i*)src) + 6);
|
|
||||||
c7 = _mm_loadu_si128(((const __m128i*)src) + 7);
|
|
||||||
_mm_prefetch((const char*)(src + 256), _MM_HINT_NTA);
|
|
||||||
src += 128;
|
|
||||||
_mm_store_si128((((__m128i*)dst) + 0), c0);
|
|
||||||
_mm_store_si128((((__m128i*)dst) + 1), c1);
|
|
||||||
_mm_store_si128((((__m128i*)dst) + 2), c2);
|
|
||||||
_mm_store_si128((((__m128i*)dst) + 3), c3);
|
|
||||||
_mm_store_si128((((__m128i*)dst) + 4), c4);
|
|
||||||
_mm_store_si128((((__m128i*)dst) + 5), c5);
|
|
||||||
_mm_store_si128((((__m128i*)dst) + 6), c6);
|
|
||||||
_mm_store_si128((((__m128i*)dst) + 7), c7);
|
|
||||||
dst += 128;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else { // big memory copy
|
|
||||||
__m128i c0, c1, c2, c3, c4, c5, c6, c7;
|
|
||||||
|
|
||||||
_mm_prefetch((const char*)(src), _MM_HINT_NTA);
|
|
||||||
|
|
||||||
if ((((size_t)src) & 15) == 0) { // source aligned
|
|
||||||
for (; size >= 128; size -= 128) {
|
|
||||||
c0 = _mm_load_si128(((const __m128i*)src) + 0);
|
|
||||||
c1 = _mm_load_si128(((const __m128i*)src) + 1);
|
|
||||||
c2 = _mm_load_si128(((const __m128i*)src) + 2);
|
|
||||||
c3 = _mm_load_si128(((const __m128i*)src) + 3);
|
|
||||||
c4 = _mm_load_si128(((const __m128i*)src) + 4);
|
|
||||||
c5 = _mm_load_si128(((const __m128i*)src) + 5);
|
|
||||||
c6 = _mm_load_si128(((const __m128i*)src) + 6);
|
|
||||||
c7 = _mm_load_si128(((const __m128i*)src) + 7);
|
|
||||||
_mm_prefetch((const char*)(src + 256), _MM_HINT_NTA);
|
|
||||||
src += 128;
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 0), c0);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 1), c1);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 2), c2);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 3), c3);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 4), c4);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 5), c5);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 6), c6);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 7), c7);
|
|
||||||
dst += 128;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else { // source unaligned
|
|
||||||
for (; size >= 128; size -= 128) {
|
|
||||||
c0 = _mm_loadu_si128(((const __m128i*)src) + 0);
|
|
||||||
c1 = _mm_loadu_si128(((const __m128i*)src) + 1);
|
|
||||||
c2 = _mm_loadu_si128(((const __m128i*)src) + 2);
|
|
||||||
c3 = _mm_loadu_si128(((const __m128i*)src) + 3);
|
|
||||||
c4 = _mm_loadu_si128(((const __m128i*)src) + 4);
|
|
||||||
c5 = _mm_loadu_si128(((const __m128i*)src) + 5);
|
|
||||||
c6 = _mm_loadu_si128(((const __m128i*)src) + 6);
|
|
||||||
c7 = _mm_loadu_si128(((const __m128i*)src) + 7);
|
|
||||||
_mm_prefetch((const char*)(src + 256), _MM_HINT_NTA);
|
|
||||||
src += 128;
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 0), c0);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 1), c1);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 2), c2);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 3), c3);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 4), c4);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 5), c5);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 6), c6);
|
|
||||||
_mm_stream_si128((((__m128i*)dst) + 7), c7);
|
|
||||||
dst += 128;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_mm_sfence();
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy_tiny(dst, src, size);
|
|
||||||
|
|
||||||
return destination;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,171 +0,0 @@
|
|||||||
//=====================================================================
|
|
||||||
//
|
|
||||||
// FastMemcpy.c - skywind3000@163.com, 2015
|
|
||||||
//
|
|
||||||
// feature:
|
|
||||||
// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc4.9)
|
|
||||||
//
|
|
||||||
//=====================================================================
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <time.h>
|
|
||||||
#include <assert.h>
|
|
||||||
|
|
||||||
#if (defined(_WIN32) || defined(WIN32))
|
|
||||||
#include <windows.h>
|
|
||||||
#include <mmsystem.h>
|
|
||||||
#ifdef _MSC_VER
|
|
||||||
#pragma comment(lib, "winmm.lib")
|
|
||||||
#endif
|
|
||||||
#elif defined(__unix)
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#else
|
|
||||||
#error it can only be compiled under windows or unix
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "FastMemcpy_Avx.h"
|
|
||||||
|
|
||||||
|
|
||||||
unsigned int gettime()
|
|
||||||
{
|
|
||||||
#if (defined(_WIN32) || defined(WIN32))
|
|
||||||
return timeGetTime();
|
|
||||||
#else
|
|
||||||
static struct timezone tz={ 0,0 };
|
|
||||||
struct timeval time;
|
|
||||||
gettimeofday(&time,&tz);
|
|
||||||
return (time.tv_sec * 1000 + time.tv_usec / 1000);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void sleepms(unsigned int millisec)
|
|
||||||
{
|
|
||||||
#if defined(_WIN32) || defined(WIN32)
|
|
||||||
Sleep(millisec);
|
|
||||||
#else
|
|
||||||
usleep(millisec * 1000);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void benchmark(int dstalign, int srcalign, size_t size, int times)
|
|
||||||
{
|
|
||||||
char *DATA1 = (char*)malloc(size + 64);
|
|
||||||
char *DATA2 = (char*)malloc(size + 64);
|
|
||||||
size_t LINEAR1 = ((size_t)DATA1);
|
|
||||||
size_t LINEAR2 = ((size_t)DATA2);
|
|
||||||
char *ALIGN1 = (char*)(((64 - (LINEAR1 & 63)) & 63) + LINEAR1);
|
|
||||||
char *ALIGN2 = (char*)(((64 - (LINEAR2 & 63)) & 63) + LINEAR2);
|
|
||||||
char *dst = (dstalign)? ALIGN1 : (ALIGN1 + 1);
|
|
||||||
char *src = (srcalign)? ALIGN2 : (ALIGN2 + 3);
|
|
||||||
unsigned int t1, t2;
|
|
||||||
int k;
|
|
||||||
|
|
||||||
sleepms(100);
|
|
||||||
t1 = gettime();
|
|
||||||
for (k = times; k > 0; k--) {
|
|
||||||
memcpy(dst, src, size);
|
|
||||||
}
|
|
||||||
t1 = gettime() - t1;
|
|
||||||
sleepms(100);
|
|
||||||
t2 = gettime();
|
|
||||||
for (k = times; k > 0; k--) {
|
|
||||||
memcpy_fast(dst, src, size);
|
|
||||||
}
|
|
||||||
t2 = gettime() - t2;
|
|
||||||
|
|
||||||
free(DATA1);
|
|
||||||
free(DATA2);
|
|
||||||
|
|
||||||
printf("result(dst %s, src %s): memcpy_fast=%dms memcpy=%d ms\n",
|
|
||||||
dstalign? "aligned" : "unalign",
|
|
||||||
srcalign? "aligned" : "unalign", (int)t2, (int)t1);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void bench(int copysize, int times)
|
|
||||||
{
|
|
||||||
printf("benchmark(size=%d bytes, times=%d):\n", copysize, times);
|
|
||||||
benchmark(1, 1, copysize, times);
|
|
||||||
benchmark(1, 0, copysize, times);
|
|
||||||
benchmark(0, 1, copysize, times);
|
|
||||||
benchmark(0, 0, copysize, times);
|
|
||||||
printf("\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void random_bench(int maxsize, int times)
|
|
||||||
{
|
|
||||||
static char A[11 * 1024 * 1024 + 2];
|
|
||||||
static char B[11 * 1024 * 1024 + 2];
|
|
||||||
static int random_offsets[0x10000];
|
|
||||||
static int random_sizes[0x8000];
|
|
||||||
unsigned int i, p1, p2;
|
|
||||||
unsigned int t1, t2;
|
|
||||||
for (i = 0; i < 0x10000; i++) { // generate random offsets
|
|
||||||
random_offsets[i] = rand() % (10 * 1024 * 1024 + 1);
|
|
||||||
}
|
|
||||||
for (i = 0; i < 0x8000; i++) { // generate random sizes
|
|
||||||
random_sizes[i] = 1 + rand() % maxsize;
|
|
||||||
}
|
|
||||||
sleepms(100);
|
|
||||||
t1 = gettime();
|
|
||||||
for (p1 = 0, p2 = 0, i = 0; i < times; i++) {
|
|
||||||
int offset1 = random_offsets[(p1++) & 0xffff];
|
|
||||||
int offset2 = random_offsets[(p1++) & 0xffff];
|
|
||||||
int size = random_sizes[(p2++) & 0x7fff];
|
|
||||||
memcpy(A + offset1, B + offset2, size);
|
|
||||||
}
|
|
||||||
t1 = gettime() - t1;
|
|
||||||
sleepms(100);
|
|
||||||
t2 = gettime();
|
|
||||||
for (p1 = 0, p2 = 0, i = 0; i < times; i++) {
|
|
||||||
int offset1 = random_offsets[(p1++) & 0xffff];
|
|
||||||
int offset2 = random_offsets[(p1++) & 0xffff];
|
|
||||||
int size = random_sizes[(p2++) & 0x7fff];
|
|
||||||
memcpy_fast(A + offset1, B + offset2, size);
|
|
||||||
}
|
|
||||||
t2 = gettime() - t2;
|
|
||||||
printf("benchmark random access:\n");
|
|
||||||
printf("memcpy_fast=%dms memcpy=%dms\n\n", (int)t2, (int)t1);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
|
||||||
#pragma comment(lib, "winmm.lib")
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int main(void)
|
|
||||||
{
|
|
||||||
#if 1
|
|
||||||
bench(32, 0x1000000);
|
|
||||||
bench(64, 0x1000000);
|
|
||||||
bench(512, 0x800000);
|
|
||||||
bench(1024, 0x400000);
|
|
||||||
#endif
|
|
||||||
bench(4096, 0x80000);
|
|
||||||
bench(8192, 0x40000);
|
|
||||||
#if 1
|
|
||||||
bench(1024 * 1024 * 1, 0x800);
|
|
||||||
bench(1024 * 1024 * 4, 0x200);
|
|
||||||
#endif
|
|
||||||
bench(1024 * 1024 * 8, 0x100);
|
|
||||||
|
|
||||||
random_bench(2048, 8000000);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,492 +0,0 @@
|
|||||||
//=====================================================================
|
|
||||||
//
|
|
||||||
// FastMemcpy.c - skywind3000@163.com, 2015
|
|
||||||
//
|
|
||||||
// feature:
|
|
||||||
// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc5.1)
|
|
||||||
//
|
|
||||||
//=====================================================================
|
|
||||||
#ifndef __FAST_MEMCPY_H__
|
|
||||||
#define __FAST_MEMCPY_H__
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <immintrin.h>
|
|
||||||
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
// force inline for compilers
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
#ifndef INLINE
|
|
||||||
#ifdef __GNUC__
|
|
||||||
#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1))
|
|
||||||
#define INLINE __inline__ __attribute__((always_inline))
|
|
||||||
#else
|
|
||||||
#define INLINE __inline__
|
|
||||||
#endif
|
|
||||||
#elif defined(_MSC_VER)
|
|
||||||
#define INLINE __forceinline
|
|
||||||
#elif (defined(__BORLANDC__) || defined(__WATCOMC__))
|
|
||||||
#define INLINE __inline
|
|
||||||
#else
|
|
||||||
#define INLINE
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
// fast copy for different sizes
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
static INLINE void memcpy_avx_16(void *dst, const void *src) {
|
|
||||||
#if 1
|
|
||||||
__m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0);
|
|
||||||
_mm_storeu_si128(((__m128i*)dst) + 0, m0);
|
|
||||||
#else
|
|
||||||
*((uint64_t*)((char*)dst + 0)) = *((uint64_t*)((const char*)src + 0));
|
|
||||||
*((uint64_t*)((char*)dst + 8)) = *((uint64_t*)((const char*)src + 8));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void memcpy_avx_32(void *dst, const void *src) {
|
|
||||||
__m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 0, m0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void memcpy_avx_64(void *dst, const void *src) {
|
|
||||||
__m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
|
|
||||||
__m256i m1 = _mm256_loadu_si256(((const __m256i*)src) + 1);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 0, m0);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 1, m1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void memcpy_avx_128(void *dst, const void *src) {
|
|
||||||
__m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
|
|
||||||
__m256i m1 = _mm256_loadu_si256(((const __m256i*)src) + 1);
|
|
||||||
__m256i m2 = _mm256_loadu_si256(((const __m256i*)src) + 2);
|
|
||||||
__m256i m3 = _mm256_loadu_si256(((const __m256i*)src) + 3);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 0, m0);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 1, m1);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 2, m2);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 3, m3);
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void memcpy_avx_256(void *dst, const void *src) {
|
|
||||||
__m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
|
|
||||||
__m256i m1 = _mm256_loadu_si256(((const __m256i*)src) + 1);
|
|
||||||
__m256i m2 = _mm256_loadu_si256(((const __m256i*)src) + 2);
|
|
||||||
__m256i m3 = _mm256_loadu_si256(((const __m256i*)src) + 3);
|
|
||||||
__m256i m4 = _mm256_loadu_si256(((const __m256i*)src) + 4);
|
|
||||||
__m256i m5 = _mm256_loadu_si256(((const __m256i*)src) + 5);
|
|
||||||
__m256i m6 = _mm256_loadu_si256(((const __m256i*)src) + 6);
|
|
||||||
__m256i m7 = _mm256_loadu_si256(((const __m256i*)src) + 7);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 0, m0);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 1, m1);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 2, m2);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 3, m3);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 4, m4);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 5, m5);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 6, m6);
|
|
||||||
_mm256_storeu_si256(((__m256i*)dst) + 7, m7);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
// tiny memory copy with jump table optimized
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
static INLINE void *memcpy_tiny(void *dst, const void *src, size_t size) {
|
|
||||||
unsigned char *dd = ((unsigned char*)dst) + size;
|
|
||||||
const unsigned char *ss = ((const unsigned char*)src) + size;
|
|
||||||
|
|
||||||
switch (size) {
|
|
||||||
case 128: memcpy_avx_128(dd - 128, ss - 128);
|
|
||||||
case 0: break;
|
|
||||||
case 129: memcpy_avx_128(dd - 129, ss - 129);
|
|
||||||
case 1: dd[-1] = ss[-1]; break;
|
|
||||||
case 130: memcpy_avx_128(dd - 130, ss - 130);
|
|
||||||
case 2: *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
|
|
||||||
case 131: memcpy_avx_128(dd - 131, ss - 131);
|
|
||||||
case 3: *((uint16_t*)(dd - 3)) = *((uint16_t*)(ss - 3)); dd[-1] = ss[-1]; break;
|
|
||||||
case 132: memcpy_avx_128(dd - 132, ss - 132);
|
|
||||||
case 4: *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
|
|
||||||
case 133: memcpy_avx_128(dd - 133, ss - 133);
|
|
||||||
case 5: *((uint32_t*)(dd - 5)) = *((uint32_t*)(ss - 5)); dd[-1] = ss[-1]; break;
|
|
||||||
case 134: memcpy_avx_128(dd - 134, ss - 134);
|
|
||||||
case 6: *((uint32_t*)(dd - 6)) = *((uint32_t*)(ss - 6)); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
|
|
||||||
case 135: memcpy_avx_128(dd - 135, ss - 135);
|
|
||||||
case 7: *((uint32_t*)(dd - 7)) = *((uint32_t*)(ss - 7)); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
|
|
||||||
case 136: memcpy_avx_128(dd - 136, ss - 136);
|
|
||||||
case 8: *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 137: memcpy_avx_128(dd - 137, ss - 137);
|
|
||||||
case 9: *((uint64_t*)(dd - 9)) = *((uint64_t*)(ss - 9)); dd[-1] = ss[-1]; break;
|
|
||||||
case 138: memcpy_avx_128(dd - 138, ss - 138);
|
|
||||||
case 10: *((uint64_t*)(dd - 10)) = *((uint64_t*)(ss - 10)); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
|
|
||||||
case 139: memcpy_avx_128(dd - 139, ss - 139);
|
|
||||||
case 11: *((uint64_t*)(dd - 11)) = *((uint64_t*)(ss - 11)); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
|
|
||||||
case 140: memcpy_avx_128(dd - 140, ss - 140);
|
|
||||||
case 12: *((uint64_t*)(dd - 12)) = *((uint64_t*)(ss - 12)); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
|
|
||||||
case 141: memcpy_avx_128(dd - 141, ss - 141);
|
|
||||||
case 13: *((uint64_t*)(dd - 13)) = *((uint64_t*)(ss - 13)); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 142: memcpy_avx_128(dd - 142, ss - 142);
|
|
||||||
case 14: *((uint64_t*)(dd - 14)) = *((uint64_t*)(ss - 14)); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 143: memcpy_avx_128(dd - 143, ss - 143);
|
|
||||||
case 15: *((uint64_t*)(dd - 15)) = *((uint64_t*)(ss - 15)); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 144: memcpy_avx_128(dd - 144, ss - 144);
|
|
||||||
case 16: memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 145: memcpy_avx_128(dd - 145, ss - 145);
|
|
||||||
case 17: memcpy_avx_16(dd - 17, ss - 17); dd[-1] = ss[-1]; break;
|
|
||||||
case 146: memcpy_avx_128(dd - 146, ss - 146);
|
|
||||||
case 18: memcpy_avx_16(dd - 18, ss - 18); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
|
|
||||||
case 147: memcpy_avx_128(dd - 147, ss - 147);
|
|
||||||
case 19: memcpy_avx_16(dd - 19, ss - 19); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
|
|
||||||
case 148: memcpy_avx_128(dd - 148, ss - 148);
|
|
||||||
case 20: memcpy_avx_16(dd - 20, ss - 20); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
|
|
||||||
case 149: memcpy_avx_128(dd - 149, ss - 149);
|
|
||||||
case 21: memcpy_avx_16(dd - 21, ss - 21); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 150: memcpy_avx_128(dd - 150, ss - 150);
|
|
||||||
case 22: memcpy_avx_16(dd - 22, ss - 22); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 151: memcpy_avx_128(dd - 151, ss - 151);
|
|
||||||
case 23: memcpy_avx_16(dd - 23, ss - 23); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 152: memcpy_avx_128(dd - 152, ss - 152);
|
|
||||||
case 24: memcpy_avx_16(dd - 24, ss - 24); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 153: memcpy_avx_128(dd - 153, ss - 153);
|
|
||||||
case 25: memcpy_avx_16(dd - 25, ss - 25); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 154: memcpy_avx_128(dd - 154, ss - 154);
|
|
||||||
case 26: memcpy_avx_16(dd - 26, ss - 26); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 155: memcpy_avx_128(dd - 155, ss - 155);
|
|
||||||
case 27: memcpy_avx_16(dd - 27, ss - 27); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 156: memcpy_avx_128(dd - 156, ss - 156);
|
|
||||||
case 28: memcpy_avx_16(dd - 28, ss - 28); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 157: memcpy_avx_128(dd - 157, ss - 157);
|
|
||||||
case 29: memcpy_avx_16(dd - 29, ss - 29); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 158: memcpy_avx_128(dd - 158, ss - 158);
|
|
||||||
case 30: memcpy_avx_16(dd - 30, ss - 30); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 159: memcpy_avx_128(dd - 159, ss - 159);
|
|
||||||
case 31: memcpy_avx_16(dd - 31, ss - 31); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 160: memcpy_avx_128(dd - 160, ss - 160);
|
|
||||||
case 32: memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 161: memcpy_avx_128(dd - 161, ss - 161);
|
|
||||||
case 33: memcpy_avx_32(dd - 33, ss - 33); dd[-1] = ss[-1]; break;
|
|
||||||
case 162: memcpy_avx_128(dd - 162, ss - 162);
|
|
||||||
case 34: memcpy_avx_32(dd - 34, ss - 34); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
|
|
||||||
case 163: memcpy_avx_128(dd - 163, ss - 163);
|
|
||||||
case 35: memcpy_avx_32(dd - 35, ss - 35); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
|
|
||||||
case 164: memcpy_avx_128(dd - 164, ss - 164);
|
|
||||||
case 36: memcpy_avx_32(dd - 36, ss - 36); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
|
|
||||||
case 165: memcpy_avx_128(dd - 165, ss - 165);
|
|
||||||
case 37: memcpy_avx_32(dd - 37, ss - 37); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 166: memcpy_avx_128(dd - 166, ss - 166);
|
|
||||||
case 38: memcpy_avx_32(dd - 38, ss - 38); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 167: memcpy_avx_128(dd - 167, ss - 167);
|
|
||||||
case 39: memcpy_avx_32(dd - 39, ss - 39); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 168: memcpy_avx_128(dd - 168, ss - 168);
|
|
||||||
case 40: memcpy_avx_32(dd - 40, ss - 40); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 169: memcpy_avx_128(dd - 169, ss - 169);
|
|
||||||
case 41: memcpy_avx_32(dd - 41, ss - 41); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 170: memcpy_avx_128(dd - 170, ss - 170);
|
|
||||||
case 42: memcpy_avx_32(dd - 42, ss - 42); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 171: memcpy_avx_128(dd - 171, ss - 171);
|
|
||||||
case 43: memcpy_avx_32(dd - 43, ss - 43); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 172: memcpy_avx_128(dd - 172, ss - 172);
|
|
||||||
case 44: memcpy_avx_32(dd - 44, ss - 44); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 173: memcpy_avx_128(dd - 173, ss - 173);
|
|
||||||
case 45: memcpy_avx_32(dd - 45, ss - 45); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 174: memcpy_avx_128(dd - 174, ss - 174);
|
|
||||||
case 46: memcpy_avx_32(dd - 46, ss - 46); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 175: memcpy_avx_128(dd - 175, ss - 175);
|
|
||||||
case 47: memcpy_avx_32(dd - 47, ss - 47); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 176: memcpy_avx_128(dd - 176, ss - 176);
|
|
||||||
case 48: memcpy_avx_32(dd - 48, ss - 48); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 177: memcpy_avx_128(dd - 177, ss - 177);
|
|
||||||
case 49: memcpy_avx_32(dd - 49, ss - 49); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 178: memcpy_avx_128(dd - 178, ss - 178);
|
|
||||||
case 50: memcpy_avx_32(dd - 50, ss - 50); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 179: memcpy_avx_128(dd - 179, ss - 179);
|
|
||||||
case 51: memcpy_avx_32(dd - 51, ss - 51); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 180: memcpy_avx_128(dd - 180, ss - 180);
|
|
||||||
case 52: memcpy_avx_32(dd - 52, ss - 52); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 181: memcpy_avx_128(dd - 181, ss - 181);
|
|
||||||
case 53: memcpy_avx_32(dd - 53, ss - 53); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 182: memcpy_avx_128(dd - 182, ss - 182);
|
|
||||||
case 54: memcpy_avx_32(dd - 54, ss - 54); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 183: memcpy_avx_128(dd - 183, ss - 183);
|
|
||||||
case 55: memcpy_avx_32(dd - 55, ss - 55); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 184: memcpy_avx_128(dd - 184, ss - 184);
|
|
||||||
case 56: memcpy_avx_32(dd - 56, ss - 56); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 185: memcpy_avx_128(dd - 185, ss - 185);
|
|
||||||
case 57: memcpy_avx_32(dd - 57, ss - 57); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 186: memcpy_avx_128(dd - 186, ss - 186);
|
|
||||||
case 58: memcpy_avx_32(dd - 58, ss - 58); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 187: memcpy_avx_128(dd - 187, ss - 187);
|
|
||||||
case 59: memcpy_avx_32(dd - 59, ss - 59); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 188: memcpy_avx_128(dd - 188, ss - 188);
|
|
||||||
case 60: memcpy_avx_32(dd - 60, ss - 60); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 189: memcpy_avx_128(dd - 189, ss - 189);
|
|
||||||
case 61: memcpy_avx_32(dd - 61, ss - 61); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 190: memcpy_avx_128(dd - 190, ss - 190);
|
|
||||||
case 62: memcpy_avx_32(dd - 62, ss - 62); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 191: memcpy_avx_128(dd - 191, ss - 191);
|
|
||||||
case 63: memcpy_avx_32(dd - 63, ss - 63); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 192: memcpy_avx_128(dd - 192, ss - 192);
|
|
||||||
case 64: memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 193: memcpy_avx_128(dd - 193, ss - 193);
|
|
||||||
case 65: memcpy_avx_64(dd - 65, ss - 65); dd[-1] = ss[-1]; break;
|
|
||||||
case 194: memcpy_avx_128(dd - 194, ss - 194);
|
|
||||||
case 66: memcpy_avx_64(dd - 66, ss - 66); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
|
|
||||||
case 195: memcpy_avx_128(dd - 195, ss - 195);
|
|
||||||
case 67: memcpy_avx_64(dd - 67, ss - 67); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
|
|
||||||
case 196: memcpy_avx_128(dd - 196, ss - 196);
|
|
||||||
case 68: memcpy_avx_64(dd - 68, ss - 68); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
|
|
||||||
case 197: memcpy_avx_128(dd - 197, ss - 197);
|
|
||||||
case 69: memcpy_avx_64(dd - 69, ss - 69); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 198: memcpy_avx_128(dd - 198, ss - 198);
|
|
||||||
case 70: memcpy_avx_64(dd - 70, ss - 70); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 199: memcpy_avx_128(dd - 199, ss - 199);
|
|
||||||
case 71: memcpy_avx_64(dd - 71, ss - 71); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 200: memcpy_avx_128(dd - 200, ss - 200);
|
|
||||||
case 72: memcpy_avx_64(dd - 72, ss - 72); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
|
|
||||||
case 201: memcpy_avx_128(dd - 201, ss - 201);
|
|
||||||
case 73: memcpy_avx_64(dd - 73, ss - 73); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 202: memcpy_avx_128(dd - 202, ss - 202);
|
|
||||||
case 74: memcpy_avx_64(dd - 74, ss - 74); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 203: memcpy_avx_128(dd - 203, ss - 203);
|
|
||||||
case 75: memcpy_avx_64(dd - 75, ss - 75); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 204: memcpy_avx_128(dd - 204, ss - 204);
|
|
||||||
case 76: memcpy_avx_64(dd - 76, ss - 76); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 205: memcpy_avx_128(dd - 205, ss - 205);
|
|
||||||
case 77: memcpy_avx_64(dd - 77, ss - 77); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 206: memcpy_avx_128(dd - 206, ss - 206);
|
|
||||||
case 78: memcpy_avx_64(dd - 78, ss - 78); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 207: memcpy_avx_128(dd - 207, ss - 207);
|
|
||||||
case 79: memcpy_avx_64(dd - 79, ss - 79); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 208: memcpy_avx_128(dd - 208, ss - 208);
|
|
||||||
case 80: memcpy_avx_64(dd - 80, ss - 80); memcpy_avx_16(dd - 16, ss - 16); break;
|
|
||||||
case 209: memcpy_avx_128(dd - 209, ss - 209);
|
|
||||||
case 81: memcpy_avx_64(dd - 81, ss - 81); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 210: memcpy_avx_128(dd - 210, ss - 210);
|
|
||||||
case 82: memcpy_avx_64(dd - 82, ss - 82); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 211: memcpy_avx_128(dd - 211, ss - 211);
|
|
||||||
case 83: memcpy_avx_64(dd - 83, ss - 83); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 212: memcpy_avx_128(dd - 212, ss - 212);
|
|
||||||
case 84: memcpy_avx_64(dd - 84, ss - 84); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 213: memcpy_avx_128(dd - 213, ss - 213);
|
|
||||||
case 85: memcpy_avx_64(dd - 85, ss - 85); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 214: memcpy_avx_128(dd - 214, ss - 214);
|
|
||||||
case 86: memcpy_avx_64(dd - 86, ss - 86); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 215: memcpy_avx_128(dd - 215, ss - 215);
|
|
||||||
case 87: memcpy_avx_64(dd - 87, ss - 87); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 216: memcpy_avx_128(dd - 216, ss - 216);
|
|
||||||
case 88: memcpy_avx_64(dd - 88, ss - 88); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 217: memcpy_avx_128(dd - 217, ss - 217);
|
|
||||||
case 89: memcpy_avx_64(dd - 89, ss - 89); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 218: memcpy_avx_128(dd - 218, ss - 218);
|
|
||||||
case 90: memcpy_avx_64(dd - 90, ss - 90); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 219: memcpy_avx_128(dd - 219, ss - 219);
|
|
||||||
case 91: memcpy_avx_64(dd - 91, ss - 91); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 220: memcpy_avx_128(dd - 220, ss - 220);
|
|
||||||
case 92: memcpy_avx_64(dd - 92, ss - 92); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 221: memcpy_avx_128(dd - 221, ss - 221);
|
|
||||||
case 93: memcpy_avx_64(dd - 93, ss - 93); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 222: memcpy_avx_128(dd - 222, ss - 222);
|
|
||||||
case 94: memcpy_avx_64(dd - 94, ss - 94); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 223: memcpy_avx_128(dd - 223, ss - 223);
|
|
||||||
case 95: memcpy_avx_64(dd - 95, ss - 95); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 224: memcpy_avx_128(dd - 224, ss - 224);
|
|
||||||
case 96: memcpy_avx_64(dd - 96, ss - 96); memcpy_avx_32(dd - 32, ss - 32); break;
|
|
||||||
case 225: memcpy_avx_128(dd - 225, ss - 225);
|
|
||||||
case 97: memcpy_avx_64(dd - 97, ss - 97); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 226: memcpy_avx_128(dd - 226, ss - 226);
|
|
||||||
case 98: memcpy_avx_64(dd - 98, ss - 98); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 227: memcpy_avx_128(dd - 227, ss - 227);
|
|
||||||
case 99: memcpy_avx_64(dd - 99, ss - 99); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 228: memcpy_avx_128(dd - 228, ss - 228);
|
|
||||||
case 100: memcpy_avx_64(dd - 100, ss - 100); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 229: memcpy_avx_128(dd - 229, ss - 229);
|
|
||||||
case 101: memcpy_avx_64(dd - 101, ss - 101); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 230: memcpy_avx_128(dd - 230, ss - 230);
|
|
||||||
case 102: memcpy_avx_64(dd - 102, ss - 102); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 231: memcpy_avx_128(dd - 231, ss - 231);
|
|
||||||
case 103: memcpy_avx_64(dd - 103, ss - 103); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 232: memcpy_avx_128(dd - 232, ss - 232);
|
|
||||||
case 104: memcpy_avx_64(dd - 104, ss - 104); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 233: memcpy_avx_128(dd - 233, ss - 233);
|
|
||||||
case 105: memcpy_avx_64(dd - 105, ss - 105); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 234: memcpy_avx_128(dd - 234, ss - 234);
|
|
||||||
case 106: memcpy_avx_64(dd - 106, ss - 106); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 235: memcpy_avx_128(dd - 235, ss - 235);
|
|
||||||
case 107: memcpy_avx_64(dd - 107, ss - 107); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 236: memcpy_avx_128(dd - 236, ss - 236);
|
|
||||||
case 108: memcpy_avx_64(dd - 108, ss - 108); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 237: memcpy_avx_128(dd - 237, ss - 237);
|
|
||||||
case 109: memcpy_avx_64(dd - 109, ss - 109); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 238: memcpy_avx_128(dd - 238, ss - 238);
|
|
||||||
case 110: memcpy_avx_64(dd - 110, ss - 110); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 239: memcpy_avx_128(dd - 239, ss - 239);
|
|
||||||
case 111: memcpy_avx_64(dd - 111, ss - 111); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 240: memcpy_avx_128(dd - 240, ss - 240);
|
|
||||||
case 112: memcpy_avx_64(dd - 112, ss - 112); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 241: memcpy_avx_128(dd - 241, ss - 241);
|
|
||||||
case 113: memcpy_avx_64(dd - 113, ss - 113); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 242: memcpy_avx_128(dd - 242, ss - 242);
|
|
||||||
case 114: memcpy_avx_64(dd - 114, ss - 114); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 243: memcpy_avx_128(dd - 243, ss - 243);
|
|
||||||
case 115: memcpy_avx_64(dd - 115, ss - 115); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 244: memcpy_avx_128(dd - 244, ss - 244);
|
|
||||||
case 116: memcpy_avx_64(dd - 116, ss - 116); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 245: memcpy_avx_128(dd - 245, ss - 245);
|
|
||||||
case 117: memcpy_avx_64(dd - 117, ss - 117); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 246: memcpy_avx_128(dd - 246, ss - 246);
|
|
||||||
case 118: memcpy_avx_64(dd - 118, ss - 118); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 247: memcpy_avx_128(dd - 247, ss - 247);
|
|
||||||
case 119: memcpy_avx_64(dd - 119, ss - 119); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 248: memcpy_avx_128(dd - 248, ss - 248);
|
|
||||||
case 120: memcpy_avx_64(dd - 120, ss - 120); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 249: memcpy_avx_128(dd - 249, ss - 249);
|
|
||||||
case 121: memcpy_avx_64(dd - 121, ss - 121); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 250: memcpy_avx_128(dd - 250, ss - 250);
|
|
||||||
case 122: memcpy_avx_64(dd - 122, ss - 122); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 251: memcpy_avx_128(dd - 251, ss - 251);
|
|
||||||
case 123: memcpy_avx_64(dd - 123, ss - 123); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 252: memcpy_avx_128(dd - 252, ss - 252);
|
|
||||||
case 124: memcpy_avx_64(dd - 124, ss - 124); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 253: memcpy_avx_128(dd - 253, ss - 253);
|
|
||||||
case 125: memcpy_avx_64(dd - 125, ss - 125); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 254: memcpy_avx_128(dd - 254, ss - 254);
|
|
||||||
case 126: memcpy_avx_64(dd - 126, ss - 126); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 255: memcpy_avx_128(dd - 255, ss - 255);
|
|
||||||
case 127: memcpy_avx_64(dd - 127, ss - 127); memcpy_avx_64(dd - 64, ss - 64); break;
|
|
||||||
case 256: memcpy_avx_256(dd - 256, ss - 256); break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return dst;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
// main routine
|
|
||||||
//---------------------------------------------------------------------
|
|
||||||
static void* memcpy_fast(void *destination, const void *source, size_t size)
|
|
||||||
{
|
|
||||||
unsigned char *dst = (unsigned char*)destination;
|
|
||||||
const unsigned char *src = (const unsigned char*)source;
|
|
||||||
static size_t cachesize = 0x200000; // L3-cache size
|
|
||||||
size_t padding;
|
|
||||||
|
|
||||||
// small memory copy
|
|
||||||
if (size <= 256) {
|
|
||||||
memcpy_tiny(dst, src, size);
|
|
||||||
_mm256_zeroupper();
|
|
||||||
return destination;
|
|
||||||
}
|
|
||||||
|
|
||||||
// align destination to 16 bytes boundary
|
|
||||||
padding = (32 - (((size_t)dst) & 31)) & 31;
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
if (padding > 0) {
|
|
||||||
__m256i head = _mm256_loadu_si256((const __m256i*)src);
|
|
||||||
_mm256_storeu_si256((__m256i*)dst, head);
|
|
||||||
dst += padding;
|
|
||||||
src += padding;
|
|
||||||
size -= padding;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
__m256i head = _mm256_loadu_si256((const __m256i*)src);
|
|
||||||
_mm256_storeu_si256((__m256i*)dst, head);
|
|
||||||
dst += padding;
|
|
||||||
src += padding;
|
|
||||||
size -= padding;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// medium size copy
|
|
||||||
if (size <= cachesize) {
|
|
||||||
__m256i c0, c1, c2, c3, c4, c5, c6, c7;
|
|
||||||
|
|
||||||
for (; size >= 256; size -= 256) {
|
|
||||||
c0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
|
|
||||||
c1 = _mm256_loadu_si256(((const __m256i*)src) + 1);
|
|
||||||
c2 = _mm256_loadu_si256(((const __m256i*)src) + 2);
|
|
||||||
c3 = _mm256_loadu_si256(((const __m256i*)src) + 3);
|
|
||||||
c4 = _mm256_loadu_si256(((const __m256i*)src) + 4);
|
|
||||||
c5 = _mm256_loadu_si256(((const __m256i*)src) + 5);
|
|
||||||
c6 = _mm256_loadu_si256(((const __m256i*)src) + 6);
|
|
||||||
c7 = _mm256_loadu_si256(((const __m256i*)src) + 7);
|
|
||||||
_mm_prefetch((const char*)(src + 512), _MM_HINT_NTA);
|
|
||||||
src += 256;
|
|
||||||
_mm256_storeu_si256((((__m256i*)dst) + 0), c0);
|
|
||||||
_mm256_storeu_si256((((__m256i*)dst) + 1), c1);
|
|
||||||
_mm256_storeu_si256((((__m256i*)dst) + 2), c2);
|
|
||||||
_mm256_storeu_si256((((__m256i*)dst) + 3), c3);
|
|
||||||
_mm256_storeu_si256((((__m256i*)dst) + 4), c4);
|
|
||||||
_mm256_storeu_si256((((__m256i*)dst) + 5), c5);
|
|
||||||
_mm256_storeu_si256((((__m256i*)dst) + 6), c6);
|
|
||||||
_mm256_storeu_si256((((__m256i*)dst) + 7), c7);
|
|
||||||
dst += 256;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else { // big memory copy
|
|
||||||
__m256i c0, c1, c2, c3, c4, c5, c6, c7;
|
|
||||||
/* __m256i c0, c1, c2, c3, c4, c5, c6, c7; */
|
|
||||||
|
|
||||||
_mm_prefetch((const char*)(src), _MM_HINT_NTA);
|
|
||||||
|
|
||||||
if ((((size_t)src) & 31) == 0) { // source aligned
|
|
||||||
for (; size >= 256; size -= 256) {
|
|
||||||
c0 = _mm256_load_si256(((const __m256i*)src) + 0);
|
|
||||||
c1 = _mm256_load_si256(((const __m256i*)src) + 1);
|
|
||||||
c2 = _mm256_load_si256(((const __m256i*)src) + 2);
|
|
||||||
c3 = _mm256_load_si256(((const __m256i*)src) + 3);
|
|
||||||
c4 = _mm256_load_si256(((const __m256i*)src) + 4);
|
|
||||||
c5 = _mm256_load_si256(((const __m256i*)src) + 5);
|
|
||||||
c6 = _mm256_load_si256(((const __m256i*)src) + 6);
|
|
||||||
c7 = _mm256_load_si256(((const __m256i*)src) + 7);
|
|
||||||
_mm_prefetch((const char*)(src + 512), _MM_HINT_NTA);
|
|
||||||
src += 256;
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 0), c0);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 1), c1);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 2), c2);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 3), c3);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 4), c4);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 5), c5);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 6), c6);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 7), c7);
|
|
||||||
dst += 256;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else { // source unaligned
|
|
||||||
for (; size >= 256; size -= 256) {
|
|
||||||
c0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
|
|
||||||
c1 = _mm256_loadu_si256(((const __m256i*)src) + 1);
|
|
||||||
c2 = _mm256_loadu_si256(((const __m256i*)src) + 2);
|
|
||||||
c3 = _mm256_loadu_si256(((const __m256i*)src) + 3);
|
|
||||||
c4 = _mm256_loadu_si256(((const __m256i*)src) + 4);
|
|
||||||
c5 = _mm256_loadu_si256(((const __m256i*)src) + 5);
|
|
||||||
c6 = _mm256_loadu_si256(((const __m256i*)src) + 6);
|
|
||||||
c7 = _mm256_loadu_si256(((const __m256i*)src) + 7);
|
|
||||||
_mm_prefetch((const char*)(src + 512), _MM_HINT_NTA);
|
|
||||||
src += 256;
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 0), c0);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 1), c1);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 2), c2);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 3), c3);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 4), c4);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 5), c5);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 6), c6);
|
|
||||||
_mm256_stream_si256((((__m256i*)dst) + 7), c7);
|
|
||||||
dst += 256;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_mm_sfence();
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy_tiny(dst, src, size);
|
|
||||||
_mm256_zeroupper();
|
|
||||||
|
|
||||||
return destination;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user