mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 10:02:01 +00:00
Merge branch 'master' into fix-s3-function
This commit is contained in:
commit
150007bfc5
1
.github/workflows/woboq.yml
vendored
1
.github/workflows/woboq.yml
vendored
@ -12,6 +12,7 @@ jobs:
|
|||||||
# don't use dockerhub push because this image updates so rarely
|
# don't use dockerhub push because this image updates so rarely
|
||||||
WoboqCodebrowser:
|
WoboqCodebrowser:
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
timeout-minutes: 420 # the task is pretty heavy, so there's an additional hour
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
|
11
README.md
11
README.md
@ -22,12 +22,13 @@ curl https://clickhouse.com/ | sh
|
|||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
* [**v23.5 Release Webinar**](https://clickhouse.com/company/events/v23-5-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-05) - Jun 8 - 23.5 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||||
* [**ClickHouse Meetup in Bangalore**](https://www.meetup.com/clickhouse-bangalore-user-group/events/293740066/) - Jun 7
|
* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Jul 4
|
||||||
* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/293426725/) - Jun 7
|
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
||||||
|
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
||||||
|
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
||||||
|
|
||||||
|
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||||
Also, keep an eye out for upcoming meetups in Amsterdam, Boston, NYC, Beijing, and Toronto. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
|
||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <array>
|
||||||
|
|
||||||
#if defined(__SSE2__)
|
#if defined(__SSE2__)
|
||||||
#include <emmintrin.h>
|
#include <emmintrin.h>
|
||||||
|
@ -11,3 +11,8 @@ constexpr double interpolateExponential(double min, double max, double ratio)
|
|||||||
assert(min > 0 && ratio >= 0 && ratio <= 1);
|
assert(min > 0 && ratio >= 0 && ratio <= 1);
|
||||||
return min * std::pow(max / min, ratio);
|
return min * std::pow(max / min, ratio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr double interpolateLinear(double min, double max, double ratio)
|
||||||
|
{
|
||||||
|
return std::lerp(min, max, ratio);
|
||||||
|
}
|
||||||
|
@ -4,7 +4,7 @@ if (SANITIZE OR NOT (
|
|||||||
))
|
))
|
||||||
if (ENABLE_JEMALLOC)
|
if (ENABLE_JEMALLOC)
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL}
|
message (${RECONFIGURE_MESSAGE_LEVEL}
|
||||||
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds.")
|
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds. Use -DENABLE_JEMALLOC=0")
|
||||||
endif ()
|
endif ()
|
||||||
set (ENABLE_JEMALLOC OFF)
|
set (ENABLE_JEMALLOC OFF)
|
||||||
else ()
|
else ()
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||||
ARG VERSION="23.5.2.7"
|
ARG VERSION="23.5.3.24"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.5.2.7"
|
ARG VERSION="23.5.3.24"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.5.2.7"
|
ARG VERSION="23.5.3.24"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -12,10 +12,10 @@ RUN apt-get update --yes && \
|
|||||||
# We need to get the repository's HEAD each time despite, so we invalidate layers' cache
|
# We need to get the repository's HEAD each time despite, so we invalidate layers' cache
|
||||||
ARG CACHE_INVALIDATOR=0
|
ARG CACHE_INVALIDATOR=0
|
||||||
RUN mkdir /sqlancer && \
|
RUN mkdir /sqlancer && \
|
||||||
wget -q -O- https://github.com/sqlancer/sqlancer/archive/master.tar.gz | \
|
wget -q -O- https://github.com/sqlancer/sqlancer/archive/main.tar.gz | \
|
||||||
tar zx -C /sqlancer && \
|
tar zx -C /sqlancer && \
|
||||||
cd /sqlancer/sqlancer-master && \
|
cd /sqlancer/sqlancer-main && \
|
||||||
mvn package -DskipTests && \
|
mvn --no-transfer-progress package -DskipTests && \
|
||||||
rm -r /root/.m2
|
rm -r /root/.m2
|
||||||
|
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
|
@ -16,7 +16,6 @@ def process_result(result_folder):
|
|||||||
"TLPGroupBy",
|
"TLPGroupBy",
|
||||||
"TLPHaving",
|
"TLPHaving",
|
||||||
"TLPWhere",
|
"TLPWhere",
|
||||||
"TLPWhereGroupBy",
|
|
||||||
"NoREC",
|
"NoREC",
|
||||||
]
|
]
|
||||||
failed_tests = []
|
failed_tests = []
|
||||||
|
@ -33,7 +33,7 @@ cd /workspace
|
|||||||
|
|
||||||
for _ in $(seq 1 60); do if [[ $(wget -q 'localhost:8123' -O-) == 'Ok.' ]]; then break ; else sleep 1; fi ; done
|
for _ in $(seq 1 60); do if [[ $(wget -q 'localhost:8123' -O-) == 'Ok.' ]]; then break ; else sleep 1; fi ; done
|
||||||
|
|
||||||
cd /sqlancer/sqlancer-master
|
cd /sqlancer/sqlancer-main
|
||||||
|
|
||||||
TIMEOUT=300
|
TIMEOUT=300
|
||||||
NUM_QUERIES=1000
|
NUM_QUERIES=1000
|
||||||
|
@ -59,6 +59,8 @@ install_packages previous_release_package_folder
|
|||||||
# available for dump via clickhouse-local
|
# available for dump via clickhouse-local
|
||||||
configure
|
configure
|
||||||
|
|
||||||
|
# it contains some new settings, but we can safely remove it
|
||||||
|
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||||
|
|
||||||
start
|
start
|
||||||
@ -85,6 +87,8 @@ export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
|||||||
export ZOOKEEPER_FAULT_INJECTION=0
|
export ZOOKEEPER_FAULT_INJECTION=0
|
||||||
configure
|
configure
|
||||||
|
|
||||||
|
# it contains some new settings, but we can safely remove it
|
||||||
|
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||||
|
|
||||||
start
|
start
|
||||||
|
19
docs/changelogs/v22.8.19.10-lts.md
Normal file
19
docs/changelogs/v22.8.19.10-lts.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.8.19.10-lts (989bc2fe8b0) FIXME as compared to v22.8.18.31-lts (4de7a95a544)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
22
docs/changelogs/v23.3.4.17-lts.md
Normal file
22
docs/changelogs/v23.3.4.17-lts.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.3.4.17-lts (2c99b73ff40) FIXME as compared to v23.3.3.52-lts (cb963c474db)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix crash when Pool::Entry::disconnect() is called [#50334](https://github.com/ClickHouse/ClickHouse/pull/50334) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Avoid storing logs in Keeper containing unknown operation [#50751](https://github.com/ClickHouse/ClickHouse/pull/50751) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
22
docs/changelogs/v23.4.4.16-stable.md
Normal file
22
docs/changelogs/v23.4.4.16-stable.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.4.4.16-stable (747ba4fc6a0) FIXME as compared to v23.4.3.48-stable (d9199f8d3cc)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix crash when Pool::Entry::disconnect() is called [#50334](https://github.com/ClickHouse/ClickHouse/pull/50334) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Fix iceberg V2 optional metadata parsing [#50974](https://github.com/ClickHouse/ClickHouse/pull/50974) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
26
docs/changelogs/v23.5.3.24-stable.md
Normal file
26
docs/changelogs/v23.5.3.24-stable.md
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.5.3.24-stable (76f54616d3b) FIXME as compared to v23.5.2.7-stable (5751aa1ab9f)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix Log family table return wrong rows count after truncate [#50585](https://github.com/ClickHouse/ClickHouse/pull/50585) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix bug in `uniqExact` parallel merging [#50590](https://github.com/ClickHouse/ClickHouse/pull/50590) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Revert recent grace hash join changes [#50699](https://github.com/ClickHouse/ClickHouse/pull/50699) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Avoid storing logs in Keeper containing unknown operation [#50751](https://github.com/ClickHouse/ClickHouse/pull/50751) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add compat setting for non-const timezones [#50834](https://github.com/ClickHouse/ClickHouse/pull/50834) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix iceberg V2 optional metadata parsing [#50974](https://github.com/ClickHouse/ClickHouse/pull/50974) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
@ -233,6 +233,12 @@ libhdfs3 support HDFS namenode HA.
|
|||||||
- `_path` — Path to the file.
|
- `_path` — Path to the file.
|
||||||
- `_file` — Name of the file.
|
- `_file` — Name of the file.
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||||
|
@ -35,6 +35,10 @@ The table structure can differ from the original MySQL table structure:
|
|||||||
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types.
|
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types.
|
||||||
- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
|
- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The MySQL Table Engine is currently not available on the ClickHouse builds for MacOS ([issue](https://github.com/ClickHouse/ClickHouse/issues/21191))
|
||||||
|
:::
|
||||||
|
|
||||||
**Engine Parameters**
|
**Engine Parameters**
|
||||||
|
|
||||||
- `host:port` — MySQL server address.
|
- `host:port` — MySQL server address.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/redis
|
slug: /en/engines/table-engines/integrations/redis
|
||||||
sidebar_position: 43
|
sidebar_position: 43
|
||||||
sidebar_label: Redis
|
sidebar_label: Redis
|
||||||
---
|
---
|
||||||
|
@ -127,6 +127,12 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
|
|||||||
ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/*', 'CSV');
|
ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/*', 'CSV');
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
## S3-related Settings {#settings}
|
## S3-related Settings {#settings}
|
||||||
|
|
||||||
The following settings can be set before query execution or placed into configuration file.
|
The following settings can be set before query execution or placed into configuration file.
|
||||||
|
@ -853,7 +853,7 @@ Tags:
|
|||||||
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
|
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
|
||||||
- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved.
|
- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved.
|
||||||
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
||||||
- `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3).
|
- `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default (if enabled) if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3). If disabled then already expired data part is written into a default volume and then right after moved to TTL volume.
|
||||||
- `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`.
|
- `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`.
|
||||||
|
|
||||||
Configuration examples:
|
Configuration examples:
|
||||||
|
@ -92,3 +92,11 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
|
|||||||
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||||
|
|
||||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||||
|
|
||||||
|
## Settings {#settings}
|
||||||
|
|
||||||
|
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
||||||
|
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||||
|
@ -102,3 +102,7 @@ SELECT * FROM url_engine_table
|
|||||||
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||||
|
|
||||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
@ -470,6 +470,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
|||||||
- [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`.
|
- [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`.
|
||||||
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
||||||
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
||||||
|
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
||||||
|
|
||||||
## CSVWithNames {#csvwithnames}
|
## CSVWithNames {#csvwithnames}
|
||||||
|
|
||||||
@ -1877,13 +1878,13 @@ The table below shows supported data types and how they match ClickHouse [data t
|
|||||||
| `string (uuid)` \** | [UUID](/docs/en/sql-reference/data-types/uuid.md) | `string (uuid)` \** |
|
| `string (uuid)` \** | [UUID](/docs/en/sql-reference/data-types/uuid.md) | `string (uuid)` \** |
|
||||||
| `fixed(16)` | [Int128/UInt128](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(16)` |
|
| `fixed(16)` | [Int128/UInt128](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(16)` |
|
||||||
| `fixed(32)` | [Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(32)` |
|
| `fixed(32)` | [Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(32)` |
|
||||||
|
| `record` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `record` |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
\* `bytes` is default, controlled by [output_format_avro_string_column_pattern](/docs/en/operations/settings/settings-formats.md/#output_format_avro_string_column_pattern)
|
\* `bytes` is default, controlled by [output_format_avro_string_column_pattern](/docs/en/operations/settings/settings-formats.md/#output_format_avro_string_column_pattern)
|
||||||
\** [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
|
\** [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
|
||||||
|
|
||||||
Unsupported Avro data types: `record` (non-root), `map`
|
|
||||||
|
|
||||||
Unsupported Avro logical data types: `time-millis`, `time-micros`, `duration`
|
Unsupported Avro logical data types: `time-millis`, `time-micros`, `duration`
|
||||||
|
|
||||||
### Inserting Data {#inserting-data-1}
|
### Inserting Data {#inserting-data-1}
|
||||||
@ -1922,7 +1923,26 @@ Output Avro file compression and sync interval can be configured with [output_fo
|
|||||||
|
|
||||||
Using the ClickHouse [DESCRIBE](/docs/en/sql-reference/statements/describe-table) function, you can quickly view the inferred format of an Avro file like the following example. This example includes the URL of a publicly accessible Avro file in the ClickHouse S3 public bucket:
|
Using the ClickHouse [DESCRIBE](/docs/en/sql-reference/statements/describe-table) function, you can quickly view the inferred format of an Avro file like the following example. This example includes the URL of a publicly accessible Avro file in the ClickHouse S3 public bucket:
|
||||||
|
|
||||||
``` DESCRIBE url('https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits.avro','Avro');
|
```
|
||||||
|
DESCRIBE url('https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits.avro','Avro);
|
||||||
|
```
|
||||||
|
```
|
||||||
|
┌─name───────────────────────┬─type────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||||
|
│ WatchID │ Int64 │ │ │ │ │ │
|
||||||
|
│ JavaEnable │ Int32 │ │ │ │ │ │
|
||||||
|
│ Title │ String │ │ │ │ │ │
|
||||||
|
│ GoodEvent │ Int32 │ │ │ │ │ │
|
||||||
|
│ EventTime │ Int32 │ │ │ │ │ │
|
||||||
|
│ EventDate │ Date32 │ │ │ │ │ │
|
||||||
|
│ CounterID │ Int32 │ │ │ │ │ │
|
||||||
|
│ ClientIP │ Int32 │ │ │ │ │ │
|
||||||
|
│ ClientIP6 │ FixedString(16) │ │ │ │ │ │
|
||||||
|
│ RegionID │ Int32 │ │ │ │ │ │
|
||||||
|
...
|
||||||
|
│ IslandID │ FixedString(16) │ │ │ │ │ │
|
||||||
|
│ RequestNum │ Int32 │ │ │ │ │ │
|
||||||
|
│ RequestTry │ Int32 │ │ │ │ │ │
|
||||||
|
└────────────────────────────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## AvroConfluent {#data-format-avro-confluent}
|
## AvroConfluent {#data-format-avro-confluent}
|
||||||
|
@ -83,6 +83,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
|||||||
- [`compression_method`](/docs/en/sql-reference/statements/create/table.md/#column-compression-codecs) and compression_level
|
- [`compression_method`](/docs/en/sql-reference/statements/create/table.md/#column-compression-codecs) and compression_level
|
||||||
- `password` for the file on disk
|
- `password` for the file on disk
|
||||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||||
|
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
|
||||||
|
|
||||||
### Usage examples
|
### Usage examples
|
||||||
|
|
||||||
|
@ -932,6 +932,38 @@ Result
|
|||||||
" string "
|
" string "
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### input_format_csv_allow_whitespace_or_tab_as_delimiter {#input_format_csv_allow_whitespace_or_tab_as_delimiter}
|
||||||
|
|
||||||
|
Allow to use whitespace or tab as field delimiter in CSV strings.
|
||||||
|
|
||||||
|
Default value: `false`.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Query
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_allow_whitespace_or_tab_as_delimiter=true --format_csv_delimiter=' '
|
||||||
|
```
|
||||||
|
|
||||||
|
Result
|
||||||
|
|
||||||
|
```text
|
||||||
|
a b
|
||||||
|
```
|
||||||
|
|
||||||
|
Query
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_allow_whitespace_or_tab_as_delimiter=true --format_csv_delimiter='\t'
|
||||||
|
```
|
||||||
|
|
||||||
|
Result
|
||||||
|
|
||||||
|
```text
|
||||||
|
a b
|
||||||
|
```
|
||||||
|
|
||||||
## Values format settings {#values-format-settings}
|
## Values format settings {#values-format-settings}
|
||||||
|
|
||||||
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
||||||
|
@ -2941,7 +2941,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
## mutations_sync {#mutations_sync}
|
## mutations_sync {#mutations_sync}
|
||||||
|
|
||||||
Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
|
Allows to execute `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3328,7 +3328,35 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
## s3_truncate_on_insert
|
## engine_file_allow_create_multiple_files {#engine_file_allow_create_multiple_files}
|
||||||
|
|
||||||
|
Enables or disables creating a new file on each insert in file engine tables if the format has the suffix (`JSON`, `ORC`, `Parquet`, etc.). If enabled, on each insert a new file will be created with a name following this pattern:
|
||||||
|
|
||||||
|
`data.Parquet` -> `data.1.Parquet` -> `data.2.Parquet`, etc.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `INSERT` query appends new data to the end of the file.
|
||||||
|
- 1 — `INSERT` query creates a new file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## engine_file_skip_empty_files {#engine_file_skip_empty_files}
|
||||||
|
|
||||||
|
Enables or disables skipping empty files in [File](../../engines/table-engines/special/file.md) engine tables.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||||
|
- 1 — `SELECT` returns empty result for empty file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## storage_file_read_method {#storage_file_read_method}
|
||||||
|
|
||||||
|
Method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local).
|
||||||
|
|
||||||
|
Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||||
|
|
||||||
|
## s3_truncate_on_insert {#s3_truncate_on_insert}
|
||||||
|
|
||||||
Enables or disables truncate before inserts in s3 engine tables. If disabled, an exception will be thrown on insert attempts if an S3 object already exists.
|
Enables or disables truncate before inserts in s3 engine tables. If disabled, an exception will be thrown on insert attempts if an S3 object already exists.
|
||||||
|
|
||||||
@ -3338,7 +3366,29 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
## hdfs_truncate_on_insert
|
## s3_create_new_file_on_insert {#s3_create_new_file_on_insert}
|
||||||
|
|
||||||
|
Enables or disables creating a new file on each insert in s3 engine tables. If enabled, on each insert a new S3 object will be created with the key, similar to this pattern:
|
||||||
|
|
||||||
|
initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `INSERT` query appends new data to the end of the file.
|
||||||
|
- 1 — `INSERT` query creates a new file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## s3_skip_empty_files {#s3_skip_empty_files}
|
||||||
|
|
||||||
|
Enables or disables skipping empty files in [S3](../../engines/table-engines/integrations/s3.md) engine tables.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||||
|
- 1 — `SELECT` returns empty result for empty file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## hdfs_truncate_on_insert {#hdfs_truncate_on_insert}
|
||||||
|
|
||||||
Enables or disables truncation before an insert in hdfs engine tables. If disabled, an exception will be thrown on an attempt to insert if a file in HDFS already exists.
|
Enables or disables truncation before an insert in hdfs engine tables. If disabled, an exception will be thrown on an attempt to insert if a file in HDFS already exists.
|
||||||
|
|
||||||
@ -3348,31 +3398,7 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
## engine_file_allow_create_multiple_files
|
## hdfs_create_new_file_on_insert {#hdfs_create_new_file_on_insert
|
||||||
|
|
||||||
Enables or disables creating a new file on each insert in file engine tables if the format has the suffix (`JSON`, `ORC`, `Parquet`, etc.). If enabled, on each insert a new file will be created with a name following this pattern:
|
|
||||||
|
|
||||||
`data.Parquet` -> `data.1.Parquet` -> `data.2.Parquet`, etc.
|
|
||||||
|
|
||||||
Possible values:
|
|
||||||
- 0 — `INSERT` query appends new data to the end of the file.
|
|
||||||
- 1 — `INSERT` query replaces existing content of the file with the new data.
|
|
||||||
|
|
||||||
Default value: `0`.
|
|
||||||
|
|
||||||
## s3_create_new_file_on_insert
|
|
||||||
|
|
||||||
Enables or disables creating a new file on each insert in s3 engine tables. If enabled, on each insert a new S3 object will be created with the key, similar to this pattern:
|
|
||||||
|
|
||||||
initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
|
|
||||||
|
|
||||||
Possible values:
|
|
||||||
- 0 — `INSERT` query appends new data to the end of the file.
|
|
||||||
- 1 — `INSERT` query replaces existing content of the file with the new data.
|
|
||||||
|
|
||||||
Default value: `0`.
|
|
||||||
|
|
||||||
## hdfs_create_new_file_on_insert
|
|
||||||
|
|
||||||
Enables or disables creating a new file on each insert in HDFS engine tables. If enabled, on each insert a new HDFS file will be created with the name, similar to this pattern:
|
Enables or disables creating a new file on each insert in HDFS engine tables. If enabled, on each insert a new HDFS file will be created with the name, similar to this pattern:
|
||||||
|
|
||||||
@ -3380,7 +3406,27 @@ initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
|
|||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
- 0 — `INSERT` query appends new data to the end of the file.
|
- 0 — `INSERT` query appends new data to the end of the file.
|
||||||
- 1 — `INSERT` query replaces existing content of the file with the new data.
|
- 1 — `INSERT` query creates a new file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## hdfs_skip_empty_files {#hdfs_skip_empty_files}
|
||||||
|
|
||||||
|
Enables or disables skipping empty files in [HDFS](../../engines/table-engines/integrations/hdfs.md) engine tables.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||||
|
- 1 — `SELECT` returns empty result for empty file.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
## engine_url_skip_empty_files {#engine_url_skip_empty_files}
|
||||||
|
|
||||||
|
Enables or disables skipping empty files in [URL](../../engines/table-engines/special/url.md) engine tables.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||||
|
- 1 — `SELECT` returns empty result for empty file.
|
||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
|
@ -11,7 +11,8 @@ Columns:
|
|||||||
- `host` ([String](../../sql-reference/data-types/string.md)) — The hostname/IP of the ZooKeeper node that ClickHouse connected to.
|
- `host` ([String](../../sql-reference/data-types/string.md)) — The hostname/IP of the ZooKeeper node that ClickHouse connected to.
|
||||||
- `port` ([String](../../sql-reference/data-types/string.md)) — The port of the ZooKeeper node that ClickHouse connected to.
|
- `port` ([String](../../sql-reference/data-types/string.md)) — The port of the ZooKeeper node that ClickHouse connected to.
|
||||||
- `index` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config.
|
- `index` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config.
|
||||||
- `connected_time` ([String](../../sql-reference/data-types/string.md)) — When the connection was established
|
- `connected_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — When the connection was established
|
||||||
|
- `session_uptime_elapsed_seconds` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Seconds elapsed since the connection was established
|
||||||
- `is_expired` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Is the current connection expired.
|
- `is_expired` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Is the current connection expired.
|
||||||
- `keeper_api_version` ([String](../../sql-reference/data-types/string.md)) — Keeper API version.
|
- `keeper_api_version` ([String](../../sql-reference/data-types/string.md)) — Keeper API version.
|
||||||
- `client_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Session id of the connection.
|
- `client_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Session id of the connection.
|
||||||
@ -23,7 +24,7 @@ SELECT * FROM system.zookeeper_connection;
|
|||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─name──────────────┬─host─────────┬─port─┬─index─┬──────connected_time─┬─is_expired─┬─keeper_api_version─┬──────────client_id─┐
|
┌─name────┬─host──────┬─port─┬─index─┬──────connected_time─┬─session_uptime_elapsed_seconds─┬─is_expired─┬─keeper_api_version─┬─client_id─┐
|
||||||
│ default_zookeeper │ 127.0.0.1 │ 2181 │ 0 │ 2023-05-19 14:30:16 │ 0 │ 0 │ 216349144108826660 │
|
│ default │ 127.0.0.1 │ 9181 │ 0 │ 2023-06-15 14:36:01 │ 3058 │ 0 │ 3 │ 5 │
|
||||||
└───────────────────┴──────────────┴──────┴───────┴─────────────────────┴────────────┴────────────────────┴────────────────────┘
|
└─────────┴───────────┴──────┴───────┴─────────────────────┴────────────────────────────────┴────────────┴────────────────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
@ -32,7 +32,7 @@ For example, Decimal32(4) can contain numbers from -99999.9999 to 99999.9999 wit
|
|||||||
|
|
||||||
Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string.
|
Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string.
|
||||||
|
|
||||||
Because modern CPUs do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64.
|
Because modern CPUs do not support 128-bit and 256-bit integers natively, operations on Decimal128 and Decimal256 are emulated. Thus, Decimal128 and Decimal256 work significantly slower than Decimal32/Decimal64.
|
||||||
|
|
||||||
## Operations and Result Type
|
## Operations and Result Type
|
||||||
|
|
||||||
@ -59,6 +59,10 @@ Some functions on Decimal return result as Float64 (for example, var or stddev).
|
|||||||
|
|
||||||
During calculations on Decimal, integer overflows might happen. Excessive digits in a fraction are discarded (not rounded). Excessive digits in integer part will lead to an exception.
|
During calculations on Decimal, integer overflows might happen. Excessive digits in a fraction are discarded (not rounded). Excessive digits in integer part will lead to an exception.
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
Overflow check is not implemented for Decimal128 and Decimal256. In case of overflow incorrect result is returned, no exception is thrown.
|
||||||
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT toDecimal32(2, 4) AS x, x / 3
|
SELECT toDecimal32(2, 4) AS x, x / 3
|
||||||
```
|
```
|
||||||
|
@ -1509,10 +1509,12 @@ parseDateTimeBestEffort(time_string [, time_zone])
|
|||||||
- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
|
- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
|
||||||
- A string with a date and a time component: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
|
- A string with a date and a time component: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
|
||||||
- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc.
|
- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc.
|
||||||
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `YYYY-MM` are substituted as `2000-01`.
|
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `MM` is substituted by `01`.
|
||||||
- A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`.
|
- A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`.
|
||||||
|
- A [syslog timestamp](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2): `Mmm dd hh:mm:ss`. For example, `Jun 9 14:20:32`.
|
||||||
|
|
||||||
For all of the formats with separator the function parses months names expressed by their full name or by the first three letters of a month name. Examples: `24/DEC/18`, `24-Dec-18`, `01-September-2018`.
|
For all of the formats with separator the function parses months names expressed by their full name or by the first three letters of a month name. Examples: `24/DEC/18`, `24-Dec-18`, `01-September-2018`.
|
||||||
|
If the year is not specified, it is considered to be equal to the current year. If the resulting DateTime happen to be in the future (even by a second after the current moment), then the current year is substituted by the previous year.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -1583,23 +1585,46 @@ Result:
|
|||||||
Query:
|
Query:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT parseDateTimeBestEffort('10 20:19');
|
SELECT toYear(now()) as year, parseDateTimeBestEffort('10 20:19');
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
```response
|
```response
|
||||||
┌─parseDateTimeBestEffort('10 20:19')─┐
|
┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐
|
||||||
│ 2000-01-10 20:19:00 │
|
│ 2023 │ 2023-01-10 20:19:00 │
|
||||||
└─────────────────────────────────────┘
|
└──────┴─────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
now() AS ts_now,
|
||||||
|
formatDateTime(ts_around, '%b %e %T') AS syslog_arg
|
||||||
|
SELECT
|
||||||
|
ts_now,
|
||||||
|
syslog_arg,
|
||||||
|
parseDateTimeBestEffort(syslog_arg)
|
||||||
|
FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──────────────ts_now─┬─syslog_arg──────┬─parseDateTimeBestEffort(syslog_arg)─┐
|
||||||
|
│ 2023-06-30 23:59:30 │ Jun 30 23:59:00 │ 2023-06-30 23:59:00 │
|
||||||
|
│ 2023-06-30 23:59:30 │ Jul 1 00:00:00 │ 2022-07-01 00:00:00 │
|
||||||
|
└─────────────────────┴─────────────────┴─────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [RFC 1123](https://tools.ietf.org/html/rfc1123)
|
- [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123)
|
||||||
- [toDate](#todate)
|
- [toDate](#todate)
|
||||||
- [toDateTime](#todatetime)
|
- [toDateTime](#todatetime)
|
||||||
- [ISO 8601 announcement by @xkcd](https://xkcd.com/1179/)
|
- [ISO 8601 announcement by @xkcd](https://xkcd.com/1179/)
|
||||||
|
- [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2)
|
||||||
|
|
||||||
## parseDateTimeBestEffortUS
|
## parseDateTimeBestEffortUS
|
||||||
|
|
||||||
|
@ -232,6 +232,7 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
|||||||
|
|
||||||
Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`).
|
Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`).
|
||||||
It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive.
|
It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive.
|
||||||
|
Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ You can specify how long (in seconds) to wait for inactive replicas to execute a
|
|||||||
For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
|
For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
For `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
||||||
|
|
||||||
## Related content
|
## Related content
|
||||||
|
|
||||||
|
@ -142,19 +142,19 @@ The following operations with [projections](/docs/en/engines/table-engines/merge
|
|||||||
|
|
||||||
## ADD PROJECTION
|
## ADD PROJECTION
|
||||||
|
|
||||||
`ALTER TABLE [db].name ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
`ALTER TABLE [db.]name [ON CLUSTER cluster] ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||||
|
|
||||||
## DROP PROJECTION
|
## DROP PROJECTION
|
||||||
|
|
||||||
`ALTER TABLE [db].name DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
`ALTER TABLE [db.]name [ON CLUSTER cluster] DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
## MATERIALIZE PROJECTION
|
## MATERIALIZE PROJECTION
|
||||||
|
|
||||||
`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
`ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
## CLEAR PROJECTION
|
## CLEAR PROJECTION
|
||||||
|
|
||||||
`ALTER TABLE [db.]table CLEAR PROJECTION [IF EXISTS] name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
`ALTER TABLE [db.]table [ON CLUSTER cluster] CLEAR PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
|
|
||||||
The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files.
|
The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files.
|
||||||
|
@ -10,15 +10,25 @@ sidebar_label: INDEX
|
|||||||
|
|
||||||
The following operations are available:
|
The following operations are available:
|
||||||
|
|
||||||
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] ADD INDEX name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata.
|
## ADD INDEX
|
||||||
|
|
||||||
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] ADD INDEX [IF NOT EXISTS] name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata.
|
||||||
|
|
||||||
- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
|
## DROP INDEX
|
||||||
|
|
||||||
The first two commands are lightweight in a sense that they only change metadata or remove files.
|
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] DROP INDEX [IF EXISTS] name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
Also, they are replicated, syncing indices metadata via ZooKeeper.
|
## MATERIALIZE INDEX
|
||||||
|
|
||||||
|
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
|
||||||
|
|
||||||
|
## CLEAR INDEX
|
||||||
|
|
||||||
|
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] CLEAR INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Deletes the secondary index files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
|
|
||||||
|
The commands `ADD`, `DROP`, and `CLEAR` are lightweight in the sense that they only change metadata or remove files.
|
||||||
|
Also, they are replicated, syncing indices metadata via ClickHouse Keeper or ZooKeeper.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants).
|
Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants).
|
||||||
|
@ -82,6 +82,35 @@ LIFETIME(MIN 0 MAX 1000)
|
|||||||
LAYOUT(FLAT())
|
LAYOUT(FLAT())
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
When using the SQL console in [ClickHouse Cloud](https://clickhouse.com), you must specify a user (`default` or any other user with the role `default_role`) and password when creating a dictionary.
|
||||||
|
:::note
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER IF NOT EXISTS clickhouse_admin
|
||||||
|
IDENTIFIED WITH sha256_password BY 'passworD43$x';
|
||||||
|
|
||||||
|
GRANT default_role TO clickhouse_admin;
|
||||||
|
|
||||||
|
CREATE DATABASE foo_db;
|
||||||
|
|
||||||
|
CREATE TABLE foo_db.source_table (
|
||||||
|
id UInt64,
|
||||||
|
value String
|
||||||
|
) ENGINE = MergeTree
|
||||||
|
PRIMARY KEY id;
|
||||||
|
|
||||||
|
CREATE DICTIONARY foo_db.id_value_dictionary
|
||||||
|
(
|
||||||
|
id UInt64,
|
||||||
|
value String
|
||||||
|
)
|
||||||
|
PRIMARY KEY id
|
||||||
|
SOURCE(CLICKHOUSE(TABLE 'source_table' USER 'clickhouse_admin' PASSWORD 'passworD43$x' DB 'foo_db' ))
|
||||||
|
LAYOUT(FLAT())
|
||||||
|
LIFETIME(MIN 0 MAX 1000);
|
||||||
|
```
|
||||||
|
|
||||||
### Create a dictionary from a table in a remote ClickHouse service
|
### Create a dictionary from a table in a remote ClickHouse service
|
||||||
|
|
||||||
Input table (in the remote ClickHouse service) `source_table`:
|
Input table (in the remote ClickHouse service) `source_table`:
|
||||||
|
@ -196,6 +196,16 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3
|
|||||||
- `_path` — Path to the file.
|
- `_path` — Path to the file.
|
||||||
- `_file` — Name of the file.
|
- `_file` — Name of the file.
|
||||||
|
|
||||||
|
## Settings
|
||||||
|
|
||||||
|
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
||||||
|
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
||||||
|
@ -97,6 +97,12 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin
|
|||||||
- `_path` — Path to the file.
|
- `_path` — Path to the file.
|
||||||
- `_file` — Name of the file.
|
- `_file` — Name of the file.
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns)
|
- [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/redis
|
slug: /en/sql-reference/table-functions/redis
|
||||||
sidebar_position: 10
|
sidebar_position: 43
|
||||||
sidebar_label: Redis
|
sidebar_label: redis
|
||||||
---
|
---
|
||||||
|
|
||||||
# Redis
|
# redis
|
||||||
|
|
||||||
This table function allows integrating ClickHouse with [Redis](https://redis.io/).
|
This table function allows integrating ClickHouse with [Redis](https://redis.io/).
|
||||||
|
|
||||||
|
@ -202,6 +202,12 @@ FROM s3(
|
|||||||
LIMIT 5;
|
LIMIT 5;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
|
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
|
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [S3 engine](../../engines/table-engines/integrations/s3.md)
|
- [S3 engine](../../engines/table-engines/integrations/s3.md)
|
||||||
|
@ -53,6 +53,10 @@ Character `|` inside patterns is used to specify failover addresses. They are it
|
|||||||
- `_path` — Path to the `URL`.
|
- `_path` — Path to the `URL`.
|
||||||
- `_file` — Resource name of the `URL`.
|
- `_file` — Resource name of the `URL`.
|
||||||
|
|
||||||
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
||||||
|
@ -31,7 +31,7 @@ sidebar_label: Decimal
|
|||||||
## Внутреннее представление {#vnutrennee-predstavlenie}
|
## Внутреннее представление {#vnutrennee-predstavlenie}
|
||||||
|
|
||||||
Внутри данные представляются как знаковые целые числа, соответсвующей разрядности. Реальные диапазоны, хранящиеся в ячейках памяти несколько больше заявленных. Заявленные диапазоны Decimal проверяются только при вводе числа из строкового представления.
|
Внутри данные представляются как знаковые целые числа, соответсвующей разрядности. Реальные диапазоны, хранящиеся в ячейках памяти несколько больше заявленных. Заявленные диапазоны Decimal проверяются только при вводе числа из строкового представления.
|
||||||
Поскольку современные CPU не поддерживают 128-битные числа, операции над Decimal128 эмулируются программно. Decimal128 работает в разы медленней чем Decimal32/Decimal64.
|
Поскольку современные CPU не поддерживают 128-битные и 256-битные числа, для операций над Decimal128 и Decimal256 эмулируются программно. Данные типы работают в разы медленнее, чем Decimal32/Decimal64.
|
||||||
|
|
||||||
## Операции и типы результата {#operatsii-i-tipy-rezultata}
|
## Операции и типы результата {#operatsii-i-tipy-rezultata}
|
||||||
|
|
||||||
@ -59,6 +59,10 @@ sidebar_label: Decimal
|
|||||||
|
|
||||||
При выполнении операций над типом Decimal могут происходить целочисленные переполнения. Лишняя дробная часть отбрасывается (не округляется). Лишняя целочисленная часть приводит к исключению.
|
При выполнении операций над типом Decimal могут происходить целочисленные переполнения. Лишняя дробная часть отбрасывается (не округляется). Лишняя целочисленная часть приводит к исключению.
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
Проверка переполнения не реализована для Decimal128 и Decimal256. В случае переполнения неверный результат будёт возвращён без выбрасывания исключения.
|
||||||
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT toDecimal32(2, 4) AS x, x / 3
|
SELECT toDecimal32(2, 4) AS x, x / 3
|
||||||
```
|
```
|
||||||
|
@ -1223,10 +1223,12 @@ parseDateTimeBestEffort(time_string[, time_zone])
|
|||||||
- [Unix timestamp](https://ru.wikipedia.org/wiki/Unix-время) в строковом представлении. 9 или 10 символов.
|
- [Unix timestamp](https://ru.wikipedia.org/wiki/Unix-время) в строковом представлении. 9 или 10 символов.
|
||||||
- Строка с датой и временем: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
|
- Строка с датой и временем: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
|
||||||
- Строка с датой, но без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` и т.д.
|
- Строка с датой, но без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` и т.д.
|
||||||
- Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` принимается равным `2000-01`.
|
- Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `MM` принимается равным `01`.
|
||||||
- Строка, содержащая дату и время вместе с информацией о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm`, и т.д. Например, `2020-12-12 17:36:00 -5:00`.
|
- Строка, содержащая дату и время вместе с информацией о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm`, и т.д. Например, `2020-12-12 17:36:00 -5:00`.
|
||||||
|
- Строка, содержащая дату и время в формате [syslog timestamp](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2): `Mmm dd hh:mm:ss`. Например, `Jun 9 14:20:32`.
|
||||||
|
|
||||||
Для всех форматов с разделителями функция распознаёт названия месяцев, выраженных в виде полного англоязычного имени месяца или в виде первых трёх символов имени месяца. Примеры: `24/DEC/18`, `24-Dec-18`, `01-September-2018`.
|
Для всех форматов с разделителями функция распознаёт названия месяцев, выраженных в виде полного англоязычного имени месяца или в виде первых трёх символов имени месяца. Примеры: `24/DEC/18`, `24-Dec-18`, `01-September-2018`.
|
||||||
|
Если год не указан, вместо него подставляется текущий год. Если в результате получается будущее время (даже на одну секунду впереди текущего момента времени), то текущий год заменяется на прошлый.
|
||||||
|
|
||||||
**Возвращаемое значение**
|
**Возвращаемое значение**
|
||||||
|
|
||||||
@ -1297,23 +1299,46 @@ AS parseDateTimeBestEffort;
|
|||||||
Запрос:
|
Запрос:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT parseDateTimeBestEffort('10 20:19');
|
SELECT toYear(now()) as year, parseDateTimeBestEffort('10 20:19');
|
||||||
```
|
```
|
||||||
|
|
||||||
Результат:
|
Результат:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─parseDateTimeBestEffort('10 20:19')─┐
|
┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐
|
||||||
│ 2000-01-10 20:19:00 │
|
│ 2023 │ 2023-01-10 20:19:00 │
|
||||||
└─────────────────────────────────────┘
|
└──────┴─────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
now() AS ts_now,
|
||||||
|
formatDateTime(ts_around, '%b %e %T') AS syslog_arg
|
||||||
|
SELECT
|
||||||
|
ts_now,
|
||||||
|
syslog_arg,
|
||||||
|
parseDateTimeBestEffort(syslog_arg)
|
||||||
|
FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around);
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌──────────────ts_now─┬─syslog_arg──────┬─parseDateTimeBestEffort(syslog_arg)─┐
|
||||||
|
│ 2023-06-30 23:59:30 │ Jun 30 23:59:00 │ 2023-06-30 23:59:00 │
|
||||||
|
│ 2023-06-30 23:59:30 │ Jul 1 00:00:00 │ 2022-07-01 00:00:00 │
|
||||||
|
└─────────────────────┴─────────────────┴─────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**Смотрите также**
|
**Смотрите также**
|
||||||
|
|
||||||
- [Информация о формате ISO 8601 от @xkcd](https://xkcd.com/1179/)
|
- [Информация о формате ISO 8601 от @xkcd](https://xkcd.com/1179/)
|
||||||
- [RFC 1123](https://tools.ietf.org/html/rfc1123)
|
- [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123)
|
||||||
- [toDate](#todate)
|
- [toDate](#todate)
|
||||||
- [toDateTime](#todatetime)
|
- [toDateTime](#todatetime)
|
||||||
|
- [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2)
|
||||||
|
|
||||||
## parseDateTimeBestEffortUS {#parsedatetimebesteffortUS}
|
## parseDateTimeBestEffortUS {#parsedatetimebesteffortUS}
|
||||||
|
|
||||||
|
@ -409,8 +409,15 @@ if (ENABLE_CLICKHOUSE_KEEPER_CONVERTER)
|
|||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-converter)
|
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-converter)
|
||||||
endif ()
|
endif ()
|
||||||
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT)
|
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT)
|
||||||
|
if (NOT BUILD_STANDALONE_KEEPER)
|
||||||
add_custom_target (clickhouse-keeper-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-keeper-client DEPENDS clickhouse)
|
add_custom_target (clickhouse-keeper-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-keeper-client DEPENDS clickhouse)
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper-client" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper-client" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||||
|
# symlink to standalone keeper binary
|
||||||
|
else ()
|
||||||
|
add_custom_target (clickhouse-keeper-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse-keeper clickhouse-keeper-client DEPENDS clickhouse-keeper)
|
||||||
|
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper-client" DESTINATION "${CMAKE_INSTALL_BINDIR}" COMPONENT clickhouse-keeper)
|
||||||
|
endif ()
|
||||||
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-client)
|
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-client)
|
||||||
endif ()
|
endif ()
|
||||||
if (ENABLE_CLICKHOUSE_DISKS)
|
if (ENABLE_CLICKHOUSE_DISKS)
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -112,6 +112,18 @@ if (BUILD_STANDALONE_KEEPER)
|
|||||||
clickhouse-keeper.cpp
|
clickhouse-keeper.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# List of resources for clickhouse-keeper client
|
||||||
|
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT)
|
||||||
|
list(APPEND CLICKHOUSE_KEEPER_STANDALONE_SOURCES
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../programs/keeper-client/KeeperClient.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../programs/keeper-client/Commands.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../programs/keeper-client/Parser.cpp
|
||||||
|
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Client/LineReader.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Client/ReplxxLineReader.cpp
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
clickhouse_add_executable(clickhouse-keeper ${CLICKHOUSE_KEEPER_STANDALONE_SOURCES})
|
clickhouse_add_executable(clickhouse-keeper ${CLICKHOUSE_KEEPER_STANDALONE_SOURCES})
|
||||||
|
|
||||||
# Remove some redundant dependencies
|
# Remove some redundant dependencies
|
||||||
@ -122,6 +134,10 @@ if (BUILD_STANDALONE_KEEPER)
|
|||||||
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/../../src/Core/include") # uses some includes from core
|
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/../../src/Core/include") # uses some includes from core
|
||||||
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/../../src") # uses some includes from common
|
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/../../src") # uses some includes from common
|
||||||
|
|
||||||
|
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT AND TARGET ch_rust::skim)
|
||||||
|
target_link_libraries(clickhouse-keeper PRIVATE ch_rust::skim)
|
||||||
|
endif()
|
||||||
|
|
||||||
target_link_libraries(clickhouse-keeper
|
target_link_libraries(clickhouse-keeper
|
||||||
PRIVATE
|
PRIVATE
|
||||||
ch_contrib::abseil_swiss_tables
|
ch_contrib::abseil_swiss_tables
|
||||||
|
@ -34,6 +34,8 @@
|
|||||||
#include "Core/Defines.h"
|
#include "Core/Defines.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
#include "config_version.h"
|
#include "config_version.h"
|
||||||
|
#include "config_tools.h"
|
||||||
|
|
||||||
|
|
||||||
#if USE_SSL
|
#if USE_SSL
|
||||||
# include <Poco/Net/Context.h>
|
# include <Poco/Net/Context.h>
|
||||||
@ -131,7 +133,10 @@ int Keeper::run()
|
|||||||
if (config().hasOption("help"))
|
if (config().hasOption("help"))
|
||||||
{
|
{
|
||||||
Poco::Util::HelpFormatter help_formatter(Keeper::options());
|
Poco::Util::HelpFormatter help_formatter(Keeper::options());
|
||||||
auto header_str = fmt::format("{} [OPTION] [-- [ARG]...]\n"
|
auto header_str = fmt::format("{0} [OPTION] [-- [ARG]...]\n"
|
||||||
|
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
||||||
|
"{0} client [OPTION]\n"
|
||||||
|
#endif
|
||||||
"positional arguments can be used to rewrite config.xml properties, for example, --http_port=8010",
|
"positional arguments can be used to rewrite config.xml properties, for example, --http_port=8010",
|
||||||
commandName());
|
commandName());
|
||||||
help_formatter.setHeader(header_str);
|
help_formatter.setHeader(header_str);
|
||||||
|
@ -1,6 +1,30 @@
|
|||||||
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
|
#include "config_tools.h"
|
||||||
|
|
||||||
|
|
||||||
int mainEntryClickHouseKeeper(int argc, char ** argv);
|
int mainEntryClickHouseKeeper(int argc, char ** argv);
|
||||||
|
|
||||||
|
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
||||||
|
int mainEntryClickHouseKeeperClient(int argc, char ** argv);
|
||||||
|
#endif
|
||||||
|
|
||||||
int main(int argc_, char ** argv_)
|
int main(int argc_, char ** argv_)
|
||||||
{
|
{
|
||||||
|
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
||||||
|
|
||||||
|
if (argc_ >= 2)
|
||||||
|
{
|
||||||
|
/// 'clickhouse-keeper --client ...' and 'clickhouse-keeper client ...' are OK
|
||||||
|
if (strcmp(argv_[1], "--client") == 0 || strcmp(argv_[1], "client") == 0)
|
||||||
|
{
|
||||||
|
argv_[1] = argv_[0];
|
||||||
|
return mainEntryClickHouseKeeperClient(--argc_, argv_ + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (argc_ > 0 && (strcmp(argv_[0], "clickhouse-keeper-client") == 0 || endsWith(argv_[0], "/clickhouse-keeper-client")))
|
||||||
|
return mainEntryClickHouseKeeperClient(argc_, argv_);
|
||||||
|
#endif
|
||||||
|
|
||||||
return mainEntryClickHouseKeeper(argc_, argv_);
|
return mainEntryClickHouseKeeper(argc_, argv_);
|
||||||
}
|
}
|
||||||
|
@ -1705,7 +1705,6 @@ try
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
|
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
|
||||||
|
|
||||||
async_metrics.start();
|
async_metrics.start();
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <bitset>
|
#include <bitset>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <Access/GSSAcceptor.h>
|
#include <Access/GSSAcceptor.h>
|
||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
|
#include <base/extended_types.h>
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <map>
|
#include <map>
|
||||||
@ -42,7 +43,7 @@ public:
|
|||||||
private:
|
private:
|
||||||
struct LDAPCacheEntry
|
struct LDAPCacheEntry
|
||||||
{
|
{
|
||||||
std::size_t last_successful_params_hash = 0;
|
UInt128 last_successful_params_hash = 0;
|
||||||
std::chrono::steady_clock::time_point last_successful_authentication_timestamp;
|
std::chrono::steady_clock::time_point last_successful_authentication_timestamp;
|
||||||
LDAPClient::SearchResultsList last_successful_role_search_results;
|
LDAPClient::SearchResultsList last_successful_role_search_results;
|
||||||
};
|
};
|
||||||
|
@ -146,8 +146,8 @@ public:
|
|||||||
for (const auto & argument : this->argument_types)
|
for (const auto & argument : this->argument_types)
|
||||||
can_be_compiled &= canBeNativeType(*argument);
|
can_be_compiled &= canBeNativeType(*argument);
|
||||||
|
|
||||||
auto return_type = this->getResultType();
|
const auto & result_type = this->getResultType();
|
||||||
can_be_compiled &= canBeNativeType(*return_type);
|
can_be_compiled &= canBeNativeType(*result_type);
|
||||||
|
|
||||||
return can_be_compiled;
|
return can_be_compiled;
|
||||||
}
|
}
|
||||||
@ -198,8 +198,8 @@ public:
|
|||||||
auto * denominator_ptr = b.CreateConstGEP1_32(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
auto * denominator_ptr = b.CreateConstGEP1_32(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||||
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
||||||
|
|
||||||
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, b.getDoubleTy());
|
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, this->getResultType());
|
||||||
auto * double_denominator = nativeCast<Denominator>(b, denominator_value, b.getDoubleTy());
|
auto * double_denominator = nativeCast<Denominator>(b, denominator_value, this->getResultType());
|
||||||
|
|
||||||
return b.CreateFDiv(double_numerator, double_denominator);
|
return b.CreateFDiv(double_numerator, double_denominator);
|
||||||
}
|
}
|
||||||
@ -308,7 +308,7 @@ public:
|
|||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
@ -316,7 +316,7 @@ public:
|
|||||||
|
|
||||||
auto * numerator_ptr = aggregate_data_ptr;
|
auto * numerator_ptr = aggregate_data_ptr;
|
||||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||||
auto * value_cast_to_numerator = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
|
auto * value_cast_to_numerator = nativeCast(b, arguments[0], toNativeDataType<Numerator>());
|
||||||
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator);
|
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator);
|
||||||
b.CreateStore(numerator_result_value, numerator_ptr);
|
b.CreateStore(numerator_result_value, numerator_ptr);
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ public:
|
|||||||
return can_be_compiled;
|
return can_be_compiled;
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
@ -63,8 +63,9 @@ public:
|
|||||||
auto * numerator_ptr = aggregate_data_ptr;
|
auto * numerator_ptr = aggregate_data_ptr;
|
||||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||||
|
|
||||||
auto * argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
|
auto numerator_data_type = toNativeDataType<Numerator>();
|
||||||
auto * weight = nativeCast(b, arguments_types[1], argument_values[1], numerator_type);
|
auto * argument = nativeCast(b, arguments[0], numerator_data_type);
|
||||||
|
auto * weight = nativeCast(b, arguments[1], numerator_data_type);
|
||||||
|
|
||||||
llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight);
|
llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight);
|
||||||
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication);
|
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication);
|
||||||
@ -75,7 +76,7 @@ public:
|
|||||||
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
|
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
|
||||||
auto * denominator_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
auto * denominator_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||||
|
|
||||||
auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], denominator_type);
|
auto * weight_cast_to_denominator = nativeCast(b, arguments[1], toNativeDataType<Denominator>());
|
||||||
|
|
||||||
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
||||||
auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator);
|
auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator);
|
||||||
|
@ -148,7 +148,7 @@ public:
|
|||||||
Data::compileCreate(builder, value_ptr);
|
Data::compileCreate(builder, value_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
@ -157,8 +157,7 @@ public:
|
|||||||
auto * value_ptr = aggregate_data_ptr;
|
auto * value_ptr = aggregate_data_ptr;
|
||||||
auto * value = b.CreateLoad(return_type, value_ptr);
|
auto * value = b.CreateLoad(return_type, value_ptr);
|
||||||
|
|
||||||
const auto & argument_value = argument_values[0];
|
auto * result_value = Data::compileUpdate(builder, value, arguments[0].value);
|
||||||
auto * result_value = Data::compileUpdate(builder, value, argument_value);
|
|
||||||
|
|
||||||
b.CreateStore(result_value, value_ptr);
|
b.CreateStore(result_value, value_ptr);
|
||||||
}
|
}
|
||||||
|
@ -165,7 +165,7 @@ public:
|
|||||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> &) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType &) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
@ -309,13 +309,13 @@ public:
|
|||||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
auto * return_type = toNativeType(b, this->getResultType());
|
auto * return_type = toNativeType(b, this->getResultType());
|
||||||
|
|
||||||
auto * is_null_value = b.CreateExtractValue(values[0], {1});
|
auto * is_null_value = b.CreateExtractValue(arguments[0].value, {1});
|
||||||
auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1));
|
auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1));
|
||||||
|
|
||||||
auto * count_value_ptr = aggregate_data_ptr;
|
auto * count_value_ptr = aggregate_data_ptr;
|
||||||
|
@ -188,18 +188,18 @@ public:
|
|||||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
const auto & nullable_type = arguments_types[0];
|
const auto & nullable_type = arguments[0].type;
|
||||||
const auto & nullable_value = argument_values[0];
|
const auto & nullable_value = arguments[0].value;
|
||||||
|
|
||||||
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
||||||
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
||||||
|
|
||||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
const auto & predicate_type = arguments.back().type;
|
||||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
auto * predicate_value = arguments.back().value;
|
||||||
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
||||||
|
|
||||||
auto * head = b.GetInsertBlock();
|
auto * head = b.GetInsertBlock();
|
||||||
@ -219,7 +219,7 @@ public:
|
|||||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||||
|
|
||||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
|
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { ValueWithType(wrapped_value, removeNullable(nullable_type)) });
|
||||||
b.CreateBr(join_block);
|
b.CreateBr(join_block);
|
||||||
|
|
||||||
b.SetInsertPoint(join_block);
|
b.SetInsertPoint(join_block);
|
||||||
@ -370,38 +370,31 @@ public:
|
|||||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
/// TODO: Check
|
|
||||||
|
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
size_t arguments_size = arguments_types.size();
|
size_t arguments_size = arguments.size();
|
||||||
|
|
||||||
|
ValuesWithType wrapped_arguments;
|
||||||
|
wrapped_arguments.reserve(arguments_size);
|
||||||
|
|
||||||
DataTypes non_nullable_types;
|
|
||||||
std::vector<llvm::Value * > wrapped_values;
|
|
||||||
std::vector<llvm::Value * > is_null_values;
|
std::vector<llvm::Value * > is_null_values;
|
||||||
|
|
||||||
non_nullable_types.resize(arguments_size);
|
|
||||||
wrapped_values.resize(arguments_size);
|
|
||||||
is_null_values.resize(arguments_size);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < arguments_size; ++i)
|
for (size_t i = 0; i < arguments_size; ++i)
|
||||||
{
|
{
|
||||||
const auto & argument_value = argument_values[i];
|
const auto & argument_value = arguments[i].value;
|
||||||
|
const auto & argument_type = arguments[i].type;
|
||||||
|
|
||||||
if (is_nullable[i])
|
if (is_nullable[i])
|
||||||
{
|
{
|
||||||
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
||||||
is_null_values[i] = b.CreateExtractValue(argument_value, {1});
|
is_null_values.emplace_back(b.CreateExtractValue(argument_value, {1}));
|
||||||
|
wrapped_arguments.emplace_back(wrapped_value, removeNullable(argument_type));
|
||||||
wrapped_values[i] = wrapped_value;
|
|
||||||
non_nullable_types[i] = removeNullable(arguments_types[i]);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
wrapped_values[i] = argument_value;
|
wrapped_arguments.emplace_back(argument_value, argument_type);
|
||||||
non_nullable_types[i] = arguments_types[i];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -415,9 +408,6 @@ public:
|
|||||||
|
|
||||||
for (auto * is_null_value : is_null_values)
|
for (auto * is_null_value : is_null_values)
|
||||||
{
|
{
|
||||||
if (!is_null_value)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
||||||
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
||||||
}
|
}
|
||||||
@ -426,8 +416,8 @@ public:
|
|||||||
|
|
||||||
b.SetInsertPoint(join_block_after_null_checks);
|
b.SetInsertPoint(join_block_after_null_checks);
|
||||||
|
|
||||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
const auto & predicate_type = arguments.back().type;
|
||||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
auto * predicate_value = arguments.back().value;
|
||||||
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
||||||
|
|
||||||
auto * if_true = llvm::BasicBlock::Create(head->getContext(), "if_true", head->getParent());
|
auto * if_true = llvm::BasicBlock::Create(head->getContext(), "if_true", head->getParent());
|
||||||
@ -444,7 +434,7 @@ public:
|
|||||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||||
|
|
||||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values);
|
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, wrapped_arguments);
|
||||||
b.CreateBr(join_block);
|
b.CreateBr(join_block);
|
||||||
|
|
||||||
b.SetInsertPoint(join_block);
|
b.SetInsertPoint(join_block);
|
||||||
|
@ -223,12 +223,12 @@ public:
|
|||||||
nested_func->compileCreate(builder, aggregate_data_ptr);
|
nested_func->compileCreate(builder, aggregate_data_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
const auto & predicate_type = arguments.back().type;
|
||||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
auto * predicate_value = arguments.back().value;
|
||||||
|
|
||||||
auto * head = b.GetInsertBlock();
|
auto * head = b.GetInsertBlock();
|
||||||
|
|
||||||
@ -242,21 +242,9 @@ public:
|
|||||||
|
|
||||||
b.SetInsertPoint(if_true);
|
b.SetInsertPoint(if_true);
|
||||||
|
|
||||||
size_t arguments_size_without_predicate = arguments_types.size() - 1;
|
ValuesWithType arguments_without_predicate = arguments;
|
||||||
|
arguments_without_predicate.pop_back();
|
||||||
DataTypes argument_types_without_predicate;
|
nested_func->compileAdd(builder, aggregate_data_ptr, arguments_without_predicate);
|
||||||
std::vector<llvm::Value *> argument_values_without_predicate;
|
|
||||||
|
|
||||||
argument_types_without_predicate.resize(arguments_size_without_predicate);
|
|
||||||
argument_values_without_predicate.resize(arguments_size_without_predicate);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < arguments_size_without_predicate; ++i)
|
|
||||||
{
|
|
||||||
argument_types_without_predicate[i] = arguments_types[i];
|
|
||||||
argument_values_without_predicate[i] = argument_values[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
nested_func->compileAdd(builder, aggregate_data_ptr, argument_types_without_predicate, argument_values_without_predicate);
|
|
||||||
|
|
||||||
b.CreateBr(join_block);
|
b.CreateBr(join_block);
|
||||||
|
|
||||||
|
@ -1459,11 +1459,11 @@ public:
|
|||||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->sizeOfData(), llvm::assumeAligned(this->alignOfData()));
|
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->sizeOfData(), llvm::assumeAligned(this->alignOfData()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
if constexpr (Data::is_compilable)
|
if constexpr (Data::is_compilable)
|
||||||
{
|
{
|
||||||
Data::compileChangeIfBetter(builder, aggregate_data_ptr, argument_values[0]);
|
Data::compileChangeIfBetter(builder, aggregate_data_ptr, arguments[0].value);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -378,12 +378,12 @@ public:
|
|||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
const auto & nullable_type = arguments_types[0];
|
const auto & nullable_type = arguments[0].type;
|
||||||
const auto & nullable_value = argument_values[0];
|
const auto & nullable_value = arguments[0].value;
|
||||||
|
|
||||||
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
||||||
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
||||||
@ -405,7 +405,7 @@ public:
|
|||||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||||
|
|
||||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
|
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { ValueWithType(wrapped_value, removeNullable(nullable_type)) });
|
||||||
b.CreateBr(join_block);
|
b.CreateBr(join_block);
|
||||||
|
|
||||||
b.SetInsertPoint(join_block);
|
b.SetInsertPoint(join_block);
|
||||||
@ -568,36 +568,32 @@ public:
|
|||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
size_t arguments_size = arguments_types.size();
|
size_t arguments_size = arguments.size();
|
||||||
|
|
||||||
|
ValuesWithType wrapped_arguments;
|
||||||
|
wrapped_arguments.reserve(arguments_size);
|
||||||
|
|
||||||
DataTypes non_nullable_types;
|
|
||||||
std::vector<llvm::Value * > wrapped_values;
|
|
||||||
std::vector<llvm::Value *> is_null_values;
|
std::vector<llvm::Value *> is_null_values;
|
||||||
|
is_null_values.reserve(arguments_size);
|
||||||
non_nullable_types.resize(arguments_size);
|
|
||||||
wrapped_values.resize(arguments_size);
|
|
||||||
is_null_values.resize(arguments_size);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < arguments_size; ++i)
|
for (size_t i = 0; i < arguments_size; ++i)
|
||||||
{
|
{
|
||||||
const auto & argument_value = argument_values[i];
|
const auto & argument_value = arguments[i].value;
|
||||||
|
const auto & argument_type = arguments[i].type;
|
||||||
|
|
||||||
if (is_nullable[i])
|
if (is_nullable[i])
|
||||||
{
|
{
|
||||||
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
||||||
is_null_values[i] = b.CreateExtractValue(argument_value, {1});
|
is_null_values.emplace_back(b.CreateExtractValue(argument_value, {1}));
|
||||||
|
wrapped_arguments.emplace_back(wrapped_value, removeNullable(argument_type));
|
||||||
wrapped_values[i] = wrapped_value;
|
|
||||||
non_nullable_types[i] = removeNullable(arguments_types[i]);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
wrapped_values[i] = argument_value;
|
wrapped_arguments.emplace_back(argument_value, argument_type);
|
||||||
non_nullable_types[i] = arguments_types[i];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -612,9 +608,6 @@ public:
|
|||||||
|
|
||||||
for (auto * is_null_value : is_null_values)
|
for (auto * is_null_value : is_null_values)
|
||||||
{
|
{
|
||||||
if (!is_null_value)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
||||||
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
||||||
}
|
}
|
||||||
@ -630,7 +623,7 @@ public:
|
|||||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||||
|
|
||||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, arguments_types, wrapped_values);
|
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, wrapped_arguments);
|
||||||
b.CreateBr(join_block);
|
b.CreateBr(join_block);
|
||||||
|
|
||||||
b.SetInsertPoint(join_block);
|
b.SetInsertPoint(join_block);
|
||||||
|
@ -588,7 +588,7 @@ public:
|
|||||||
b.CreateStore(llvm::Constant::getNullValue(return_type), aggregate_sum_ptr);
|
b.CreateStore(llvm::Constant::getNullValue(return_type), aggregate_sum_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||||
{
|
{
|
||||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||||
|
|
||||||
@ -597,10 +597,7 @@ public:
|
|||||||
auto * sum_value_ptr = aggregate_data_ptr;
|
auto * sum_value_ptr = aggregate_data_ptr;
|
||||||
auto * sum_value = b.CreateLoad(return_type, sum_value_ptr);
|
auto * sum_value = b.CreateLoad(return_type, sum_value_ptr);
|
||||||
|
|
||||||
const auto & argument_type = arguments_types[0];
|
auto * value_cast_to_result = nativeCast(b, arguments[0], this->getResultType());
|
||||||
const auto & argument_value = argument_values[0];
|
|
||||||
|
|
||||||
auto * value_cast_to_result = nativeCast(b, argument_type, argument_value, return_type);
|
|
||||||
auto * sum_result_value = sum_value->getType()->isIntegerTy() ? b.CreateAdd(sum_value, value_cast_to_result) : b.CreateFAdd(sum_value, value_cast_to_result);
|
auto * sum_result_value = sum_value->getType()->isIntegerTy() ? b.CreateAdd(sum_value, value_cast_to_result) : b.CreateFAdd(sum_value, value_cast_to_result);
|
||||||
|
|
||||||
b.CreateStore(sum_result_value, sum_value_ptr);
|
b.CreateStore(sum_result_value, sum_value_ptr);
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <Core/Block.h>
|
#include <Core/Block.h>
|
||||||
#include <Core/ColumnNumbers.h>
|
#include <Core/ColumnNumbers.h>
|
||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
|
#include <Core/ValuesWithType.h>
|
||||||
#include <Interpreters/Context_fwd.h>
|
#include <Interpreters/Context_fwd.h>
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
@ -389,7 +390,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// compileAdd should generate code for updating aggregate function state stored in aggregate_data_ptr
|
/// compileAdd should generate code for updating aggregate function state stored in aggregate_data_ptr
|
||||||
virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const DataTypes & /*arguments_types*/, const std::vector<llvm::Value *> & /*arguments_values*/) const
|
virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const ValuesWithType & /*arguments*/) const
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName());
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName());
|
||||||
}
|
}
|
||||||
|
@ -185,11 +185,10 @@ void BackupCoordinationReplicatedTables::addPartNames(PartNamesForTableReplica &
|
|||||||
const String & other_replica_name = **other.replica_names.begin();
|
const String & other_replica_name = **other.replica_names.begin();
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::CANNOT_BACKUP_TABLE,
|
ErrorCodes::CANNOT_BACKUP_TABLE,
|
||||||
"Table {} on replica {} has part {} which is different from the part on replica {}. Must be the same",
|
"Table {} on replica {} has part {} different from the part on replica {} "
|
||||||
table_name_for_logs,
|
"(checksum '{}' on replica {} != checksum '{}' on replica {})",
|
||||||
replica_name,
|
table_name_for_logs, replica_name, part_name, other_replica_name,
|
||||||
part_name,
|
getHexUIntLowercase(checksum), replica_name, getHexUIntLowercase(other.checksum), other_replica_name);
|
||||||
other_replica_name);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,6 +85,9 @@ void BackupCoordinationStageSync::setError(const String & current_host, const Ex
|
|||||||
writeException(exception, buf, true);
|
writeException(exception, buf, true);
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
|
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
|
||||||
|
|
||||||
|
/// When backup/restore fails, it removes the nodes from Zookeeper.
|
||||||
|
/// Sometimes it fails to remove all nodes. It's possible that it removes /error node, but fails to remove /stage node,
|
||||||
|
/// so the following line tries to preserve the error status.
|
||||||
auto code = zookeeper->trySet(zookeeper_path, Stage::ERROR);
|
auto code = zookeeper->trySet(zookeeper_path, Stage::ERROR);
|
||||||
if (code != Coordination::Error::ZOK)
|
if (code != Coordination::Error::ZOK)
|
||||||
throw zkutil::KeeperException(code, zookeeper_path);
|
throw zkutil::KeeperException(code, zookeeper_path);
|
||||||
|
@ -152,8 +152,7 @@ namespace
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
if (coordination)
|
sendExceptionToCoordination(coordination, Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode()));
|
||||||
coordination->setError(Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode()));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
|
|||||||
String client_name,
|
String client_name,
|
||||||
Protocol::Compression compression,
|
Protocol::Compression compression,
|
||||||
Protocol::Secure secure,
|
Protocol::Secure secure,
|
||||||
Int64 priority)
|
Priority priority)
|
||||||
{
|
{
|
||||||
Key key{
|
Key key{
|
||||||
max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
|
max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
|
||||||
@ -74,7 +74,7 @@ size_t ConnectionPoolFactory::KeyHash::operator()(const ConnectionPoolFactory::K
|
|||||||
hash_combine(seed, hash_value(k.client_name));
|
hash_combine(seed, hash_value(k.client_name));
|
||||||
hash_combine(seed, hash_value(k.compression));
|
hash_combine(seed, hash_value(k.compression));
|
||||||
hash_combine(seed, hash_value(k.secure));
|
hash_combine(seed, hash_value(k.secure));
|
||||||
hash_combine(seed, hash_value(k.priority));
|
hash_combine(seed, hash_value(k.priority.value));
|
||||||
return seed;
|
return seed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Common/PoolBase.h>
|
#include <Common/PoolBase.h>
|
||||||
|
#include <Common/Priority.h>
|
||||||
#include <Client/Connection.h>
|
#include <Client/Connection.h>
|
||||||
#include <IO/ConnectionTimeouts.h>
|
#include <IO/ConnectionTimeouts.h>
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
@ -34,7 +35,7 @@ public:
|
|||||||
const Settings * settings = nullptr,
|
const Settings * settings = nullptr,
|
||||||
bool force_connected = true) = 0;
|
bool force_connected = true) = 0;
|
||||||
|
|
||||||
virtual Int64 getPriority() const { return 1; }
|
virtual Priority getPriority() const { return Priority{1}; }
|
||||||
};
|
};
|
||||||
|
|
||||||
using ConnectionPoolPtr = std::shared_ptr<IConnectionPool>;
|
using ConnectionPoolPtr = std::shared_ptr<IConnectionPool>;
|
||||||
@ -60,7 +61,7 @@ public:
|
|||||||
const String & client_name_,
|
const String & client_name_,
|
||||||
Protocol::Compression compression_,
|
Protocol::Compression compression_,
|
||||||
Protocol::Secure secure_,
|
Protocol::Secure secure_,
|
||||||
Int64 priority_ = 1)
|
Priority priority_ = Priority{1})
|
||||||
: Base(max_connections_,
|
: Base(max_connections_,
|
||||||
&Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
|
&Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
|
||||||
host(host_),
|
host(host_),
|
||||||
@ -103,7 +104,7 @@ public:
|
|||||||
return host + ":" + toString(port);
|
return host + ":" + toString(port);
|
||||||
}
|
}
|
||||||
|
|
||||||
Int64 getPriority() const override
|
Priority getPriority() const override
|
||||||
{
|
{
|
||||||
return priority;
|
return priority;
|
||||||
}
|
}
|
||||||
@ -134,7 +135,7 @@ private:
|
|||||||
String client_name;
|
String client_name;
|
||||||
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
|
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
|
||||||
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.
|
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.
|
||||||
Int64 priority; /// priority from <remote_servers>
|
Priority priority; /// priority from <remote_servers>
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -157,7 +158,7 @@ public:
|
|||||||
String client_name;
|
String client_name;
|
||||||
Protocol::Compression compression;
|
Protocol::Compression compression;
|
||||||
Protocol::Secure secure;
|
Protocol::Secure secure;
|
||||||
Int64 priority;
|
Priority priority;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct KeyHash
|
struct KeyHash
|
||||||
@ -180,7 +181,7 @@ public:
|
|||||||
String client_name,
|
String client_name,
|
||||||
Protocol::Compression compression,
|
Protocol::Compression compression,
|
||||||
Protocol::Secure secure,
|
Protocol::Secure secure,
|
||||||
Int64 priority);
|
Priority priority);
|
||||||
private:
|
private:
|
||||||
mutable std::mutex mutex;
|
mutable std::mutex mutex;
|
||||||
using ConnectionPoolWeakPtr = std::weak_ptr<IConnectionPool>;
|
using ConnectionPoolWeakPtr = std::weak_ptr<IConnectionPool>;
|
||||||
|
@ -71,7 +71,7 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts
|
|||||||
return Base::get(max_ignored_errors, fallback_to_stale_replicas, try_get_entry, get_priority);
|
return Base::get(max_ignored_errors, fallback_to_stale_replicas, try_get_entry, get_priority);
|
||||||
}
|
}
|
||||||
|
|
||||||
Int64 ConnectionPoolWithFailover::getPriority() const
|
Priority ConnectionPoolWithFailover::getPriority() const
|
||||||
{
|
{
|
||||||
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto & a, const auto & b)
|
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto & a, const auto & b)
|
||||||
{
|
{
|
||||||
|
@ -48,7 +48,7 @@ public:
|
|||||||
const Settings * settings,
|
const Settings * settings,
|
||||||
bool force_connected) override; /// From IConnectionPool
|
bool force_connected) override; /// From IConnectionPool
|
||||||
|
|
||||||
Int64 getPriority() const override; /// From IConnectionPool
|
Priority getPriority() const override; /// From IConnectionPool
|
||||||
|
|
||||||
/** Allocates up to the specified number of connections to work.
|
/** Allocates up to the specified number of connections to work.
|
||||||
* Connections provide access to different replicas of one shard.
|
* Connections provide access to different replicas of one shard.
|
||||||
|
@ -151,13 +151,13 @@ public:
|
|||||||
|
|
||||||
ColumnPtr compress() const override;
|
ColumnPtr compress() const override;
|
||||||
|
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override
|
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(offsets);
|
callback(offsets);
|
||||||
callback(data);
|
callback(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(*offsets);
|
callback(*offsets);
|
||||||
offsets->forEachSubcolumnRecursively(callback);
|
offsets->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -230,12 +230,12 @@ public:
|
|||||||
data->getExtremes(min, max);
|
data->getExtremes(min, max);
|
||||||
}
|
}
|
||||||
|
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override
|
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(data);
|
callback(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(*data);
|
callback(*data);
|
||||||
data->forEachSubcolumnRecursively(callback);
|
data->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -166,7 +166,7 @@ public:
|
|||||||
size_t byteSizeAt(size_t n) const override { return getDictionary().byteSizeAt(getIndexes().getUInt(n)); }
|
size_t byteSizeAt(size_t n) const override { return getDictionary().byteSizeAt(getIndexes().getUInt(n)); }
|
||||||
size_t allocatedBytes() const override { return idx.getPositions()->allocatedBytes() + getDictionary().allocatedBytes(); }
|
size_t allocatedBytes() const override { return idx.getPositions()->allocatedBytes() + getDictionary().allocatedBytes(); }
|
||||||
|
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override
|
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(idx.getPositionsPtr());
|
callback(idx.getPositionsPtr());
|
||||||
|
|
||||||
@ -175,7 +175,7 @@ public:
|
|||||||
callback(dictionary.getColumnUniquePtr());
|
callback(dictionary.getColumnUniquePtr());
|
||||||
}
|
}
|
||||||
|
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(*idx.getPositionsPtr());
|
callback(*idx.getPositionsPtr());
|
||||||
idx.getPositionsPtr()->forEachSubcolumnRecursively(callback);
|
idx.getPositionsPtr()->forEachSubcolumnRecursively(callback);
|
||||||
@ -340,7 +340,7 @@ private:
|
|||||||
explicit Dictionary(MutableColumnPtr && column_unique, bool is_shared);
|
explicit Dictionary(MutableColumnPtr && column_unique, bool is_shared);
|
||||||
explicit Dictionary(ColumnPtr column_unique, bool is_shared);
|
explicit Dictionary(ColumnPtr column_unique, bool is_shared);
|
||||||
|
|
||||||
const ColumnPtr & getColumnUniquePtr() const { return column_unique; }
|
const WrappedPtr & getColumnUniquePtr() const { return column_unique; }
|
||||||
WrappedPtr & getColumnUniquePtr() { return column_unique; }
|
WrappedPtr & getColumnUniquePtr() { return column_unique; }
|
||||||
|
|
||||||
const IColumnUnique & getColumnUnique() const { return static_cast<const IColumnUnique &>(*column_unique); }
|
const IColumnUnique & getColumnUnique() const { return static_cast<const IColumnUnique &>(*column_unique); }
|
||||||
|
@ -273,12 +273,12 @@ void ColumnMap::getExtremes(Field & min, Field & max) const
|
|||||||
max = std::move(map_max_value);
|
max = std::move(map_max_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnMap::forEachSubcolumn(ColumnCallback callback) const
|
void ColumnMap::forEachSubcolumn(MutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
callback(nested);
|
callback(nested);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnMap::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
void ColumnMap::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
callback(*nested);
|
callback(*nested);
|
||||||
nested->forEachSubcolumnRecursively(callback);
|
nested->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -88,8 +88,8 @@ public:
|
|||||||
size_t byteSizeAt(size_t n) const override;
|
size_t byteSizeAt(size_t n) const override;
|
||||||
size_t allocatedBytes() const override;
|
size_t allocatedBytes() const override;
|
||||||
void protect() override;
|
void protect() override;
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override;
|
void forEachSubcolumn(MutableColumnCallback callback) override;
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
|
||||||
bool structureEquals(const IColumn & rhs) const override;
|
bool structureEquals(const IColumn & rhs) const override;
|
||||||
double getRatioOfDefaultRows(double sample_ratio) const override;
|
double getRatioOfDefaultRows(double sample_ratio) const override;
|
||||||
UInt64 getNumberOfDefaultRows() const override;
|
UInt64 getNumberOfDefaultRows() const override;
|
||||||
|
@ -130,13 +130,13 @@ public:
|
|||||||
|
|
||||||
ColumnPtr compress() const override;
|
ColumnPtr compress() const override;
|
||||||
|
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override
|
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(nested_column);
|
callback(nested_column);
|
||||||
callback(null_map);
|
callback(null_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(*nested_column);
|
callback(*nested_column);
|
||||||
nested_column->forEachSubcolumnRecursively(callback);
|
nested_column->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -664,18 +664,18 @@ size_t ColumnObject::allocatedBytes() const
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnObject::forEachSubcolumn(ColumnCallback callback) const
|
void ColumnObject::forEachSubcolumn(MutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
for (const auto & entry : subcolumns)
|
for (auto & entry : subcolumns)
|
||||||
for (const auto & part : entry->data.data)
|
for (auto & part : entry->data.data)
|
||||||
callback(part);
|
callback(part);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnObject::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
void ColumnObject::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
for (const auto & entry : subcolumns)
|
for (auto & entry : subcolumns)
|
||||||
{
|
{
|
||||||
for (const auto & part : entry->data.data)
|
for (auto & part : entry->data.data)
|
||||||
{
|
{
|
||||||
callback(*part);
|
callback(*part);
|
||||||
part->forEachSubcolumnRecursively(callback);
|
part->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -206,8 +206,8 @@ public:
|
|||||||
size_t size() const override;
|
size_t size() const override;
|
||||||
size_t byteSize() const override;
|
size_t byteSize() const override;
|
||||||
size_t allocatedBytes() const override;
|
size_t allocatedBytes() const override;
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override;
|
void forEachSubcolumn(MutableColumnCallback callback) override;
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
|
||||||
void insert(const Field & field) override;
|
void insert(const Field & field) override;
|
||||||
void insertDefault() override;
|
void insertDefault() override;
|
||||||
void insertFrom(const IColumn & src, size_t n) override;
|
void insertFrom(const IColumn & src, size_t n) override;
|
||||||
|
@ -751,13 +751,13 @@ bool ColumnSparse::structureEquals(const IColumn & rhs) const
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnSparse::forEachSubcolumn(ColumnCallback callback) const
|
void ColumnSparse::forEachSubcolumn(MutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
callback(values);
|
callback(values);
|
||||||
callback(offsets);
|
callback(offsets);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnSparse::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
void ColumnSparse::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
callback(*values);
|
callback(*values);
|
||||||
values->forEachSubcolumnRecursively(callback);
|
values->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -140,8 +140,8 @@ public:
|
|||||||
|
|
||||||
ColumnPtr compress() const override;
|
ColumnPtr compress() const override;
|
||||||
|
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override;
|
void forEachSubcolumn(MutableColumnCallback callback) override;
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
|
||||||
|
|
||||||
bool structureEquals(const IColumn & rhs) const override;
|
bool structureEquals(const IColumn & rhs) const override;
|
||||||
|
|
||||||
|
@ -31,14 +31,12 @@ ColumnString::ColumnString(const ColumnString & src)
|
|||||||
offsets(src.offsets.begin(), src.offsets.end()),
|
offsets(src.offsets.begin(), src.offsets.end()),
|
||||||
chars(src.chars.begin(), src.chars.end())
|
chars(src.chars.begin(), src.chars.end())
|
||||||
{
|
{
|
||||||
if (!offsets.empty())
|
Offset last_offset = offsets.empty() ? 0 : offsets.back();
|
||||||
{
|
|
||||||
Offset last_offset = offsets.back();
|
|
||||||
|
|
||||||
/// This will also prevent possible overflow in offset.
|
/// This will also prevent possible overflow in offset.
|
||||||
if (chars.size() != last_offset)
|
if (last_offset != chars.size())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "String offsets has data inconsistent with chars array");
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
}
|
"String offsets has data inconsistent with chars array. Last offset: {}, array length: {}",
|
||||||
|
last_offset, chars.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -157,6 +155,7 @@ ColumnPtr ColumnString::filter(const Filter & filt, ssize_t result_size_hint) co
|
|||||||
Offsets & res_offsets = res->offsets;
|
Offsets & res_offsets = res->offsets;
|
||||||
|
|
||||||
filterArraysImpl<UInt8>(chars, offsets, res_chars, res_offsets, filt, result_size_hint);
|
filterArraysImpl<UInt8>(chars, offsets, res_chars, res_offsets, filt, result_size_hint);
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -571,10 +570,11 @@ void ColumnString::protect()
|
|||||||
|
|
||||||
void ColumnString::validate() const
|
void ColumnString::validate() const
|
||||||
{
|
{
|
||||||
if (!offsets.empty() && offsets.back() != chars.size())
|
Offset last_offset = offsets.empty() ? 0 : offsets.back();
|
||||||
|
if (last_offset != chars.size())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
"ColumnString validation failed: size mismatch (internal logical error) {} != {}",
|
"ColumnString validation failed: size mismatch (internal logical error) {} != {}",
|
||||||
offsets.back(), chars.size());
|
last_offset, chars.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -495,15 +495,15 @@ void ColumnTuple::getExtremes(Field & min, Field & max) const
|
|||||||
max = max_tuple;
|
max = max_tuple;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnTuple::forEachSubcolumn(ColumnCallback callback) const
|
void ColumnTuple::forEachSubcolumn(MutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
for (const auto & column : columns)
|
for (auto & column : columns)
|
||||||
callback(column);
|
callback(column);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnTuple::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
void ColumnTuple::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
||||||
{
|
{
|
||||||
for (const auto & column : columns)
|
for (auto & column : columns)
|
||||||
{
|
{
|
||||||
callback(*column);
|
callback(*column);
|
||||||
column->forEachSubcolumnRecursively(callback);
|
column->forEachSubcolumnRecursively(callback);
|
||||||
|
@ -96,8 +96,8 @@ public:
|
|||||||
size_t byteSizeAt(size_t n) const override;
|
size_t byteSizeAt(size_t n) const override;
|
||||||
size_t allocatedBytes() const override;
|
size_t allocatedBytes() const override;
|
||||||
void protect() override;
|
void protect() override;
|
||||||
void forEachSubcolumn(ColumnCallback callback) const override;
|
void forEachSubcolumn(MutableColumnCallback callback) override;
|
||||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
|
||||||
bool structureEquals(const IColumn & rhs) const override;
|
bool structureEquals(const IColumn & rhs) const override;
|
||||||
bool isCollationSupported() const override;
|
bool isCollationSupported() const override;
|
||||||
ColumnPtr compress() const override;
|
ColumnPtr compress() const override;
|
||||||
|
@ -62,19 +62,19 @@ ColumnPtr IColumn::createWithOffsets(const Offsets & offsets, const Field & defa
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void IColumn::forEachSubcolumn(MutableColumnCallback callback)
|
void IColumn::forEachSubcolumn(ColumnCallback callback) const
|
||||||
{
|
{
|
||||||
std::as_const(*this).forEachSubcolumn([&callback](const WrappedPtr & subcolumn)
|
const_cast<IColumn*>(this)->forEachSubcolumn([&callback](WrappedPtr & subcolumn)
|
||||||
{
|
{
|
||||||
callback(const_cast<WrappedPtr &>(subcolumn));
|
callback(std::as_const(subcolumn));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void IColumn::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
void IColumn::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
||||||
{
|
{
|
||||||
std::as_const(*this).forEachSubcolumnRecursively([&callback](const IColumn & subcolumn)
|
const_cast<IColumn*>(this)->forEachSubcolumnRecursively([&callback](IColumn & subcolumn)
|
||||||
{
|
{
|
||||||
callback(const_cast<IColumn &>(subcolumn));
|
callback(std::as_const(subcolumn));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -418,21 +418,23 @@ public:
|
|||||||
/// If the column contains subcolumns (such as Array, Nullable, etc), do callback on them.
|
/// If the column contains subcolumns (such as Array, Nullable, etc), do callback on them.
|
||||||
/// Shallow: doesn't do recursive calls; don't do call for itself.
|
/// Shallow: doesn't do recursive calls; don't do call for itself.
|
||||||
|
|
||||||
using ColumnCallback = std::function<void(const WrappedPtr &)>;
|
|
||||||
virtual void forEachSubcolumn(ColumnCallback) const {}
|
|
||||||
|
|
||||||
using MutableColumnCallback = std::function<void(WrappedPtr &)>;
|
using MutableColumnCallback = std::function<void(WrappedPtr &)>;
|
||||||
virtual void forEachSubcolumn(MutableColumnCallback callback);
|
virtual void forEachSubcolumn(MutableColumnCallback) {}
|
||||||
|
|
||||||
|
/// Default implementation calls the mutable overload using const_cast.
|
||||||
|
using ColumnCallback = std::function<void(const WrappedPtr &)>;
|
||||||
|
virtual void forEachSubcolumn(ColumnCallback) const;
|
||||||
|
|
||||||
/// Similar to forEachSubcolumn but it also do recursive calls.
|
/// Similar to forEachSubcolumn but it also do recursive calls.
|
||||||
/// In recursive calls it's prohibited to replace pointers
|
/// In recursive calls it's prohibited to replace pointers
|
||||||
/// to subcolumns, so we use another callback function.
|
/// to subcolumns, so we use another callback function.
|
||||||
|
|
||||||
using RecursiveColumnCallback = std::function<void(const IColumn &)>;
|
|
||||||
virtual void forEachSubcolumnRecursively(RecursiveColumnCallback) const {}
|
|
||||||
|
|
||||||
using RecursiveMutableColumnCallback = std::function<void(IColumn &)>;
|
using RecursiveMutableColumnCallback = std::function<void(IColumn &)>;
|
||||||
virtual void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback);
|
virtual void forEachSubcolumnRecursively(RecursiveMutableColumnCallback) {}
|
||||||
|
|
||||||
|
/// Default implementation calls the mutable overload using const_cast.
|
||||||
|
using RecursiveColumnCallback = std::function<void(const IColumn &)>;
|
||||||
|
virtual void forEachSubcolumnRecursively(RecursiveColumnCallback) const;
|
||||||
|
|
||||||
/// Columns have equal structure.
|
/// Columns have equal structure.
|
||||||
/// If true - you can use "compareAt", "insertFrom", etc. methods.
|
/// If true - you can use "compareAt", "insertFrom", etc. methods.
|
||||||
|
@ -104,7 +104,7 @@ DNSResolver::IPAddresses hostByName(const std::string & host)
|
|||||||
}
|
}
|
||||||
catch (const Poco::Net::DNSException & e)
|
catch (const Poco::Net::DNSException & e)
|
||||||
{
|
{
|
||||||
LOG_ERROR(&Poco::Logger::get("DNSResolver"), "Cannot resolve host ({}), error {}: {}.", host, e.code(), e.name());
|
LOG_WARNING(&Poco::Logger::get("DNSResolver"), "Cannot resolve host ({}), error {}: {}.", host, e.code(), e.name());
|
||||||
addresses.clear();
|
addresses.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include <Common/GetPriorityForLoadBalancing.h>
|
#include <Common/GetPriorityForLoadBalancing.h>
|
||||||
|
#include <Common/Priority.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -8,23 +9,23 @@ namespace ErrorCodes
|
|||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::function<size_t(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const
|
std::function<Priority(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const
|
||||||
{
|
{
|
||||||
std::function<size_t(size_t index)> get_priority;
|
std::function<Priority(size_t index)> get_priority;
|
||||||
switch (load_balance)
|
switch (load_balance)
|
||||||
{
|
{
|
||||||
case LoadBalancing::NEAREST_HOSTNAME:
|
case LoadBalancing::NEAREST_HOSTNAME:
|
||||||
if (hostname_differences.empty())
|
if (hostname_differences.empty())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "It's a bug: hostname_differences is not initialized");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "It's a bug: hostname_differences is not initialized");
|
||||||
get_priority = [this](size_t i) { return hostname_differences[i]; };
|
get_priority = [this](size_t i) { return Priority{static_cast<Int64>(hostname_differences[i])}; };
|
||||||
break;
|
break;
|
||||||
case LoadBalancing::IN_ORDER:
|
case LoadBalancing::IN_ORDER:
|
||||||
get_priority = [](size_t i) { return i; };
|
get_priority = [](size_t i) { return Priority{static_cast<Int64>(i)}; };
|
||||||
break;
|
break;
|
||||||
case LoadBalancing::RANDOM:
|
case LoadBalancing::RANDOM:
|
||||||
break;
|
break;
|
||||||
case LoadBalancing::FIRST_OR_RANDOM:
|
case LoadBalancing::FIRST_OR_RANDOM:
|
||||||
get_priority = [offset](size_t i) -> size_t { return i != offset; };
|
get_priority = [offset](size_t i) { return i != offset ? Priority{1} : Priority{0}; };
|
||||||
break;
|
break;
|
||||||
case LoadBalancing::ROUND_ROBIN:
|
case LoadBalancing::ROUND_ROBIN:
|
||||||
if (last_used >= pool_size)
|
if (last_used >= pool_size)
|
||||||
@ -38,8 +39,8 @@ std::function<size_t(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc
|
|||||||
* */
|
* */
|
||||||
get_priority = [this, pool_size](size_t i)
|
get_priority = [this, pool_size](size_t i)
|
||||||
{
|
{
|
||||||
++i;
|
++i; // To make `i` indexing start with 1 instead of 0 as `last_used` does
|
||||||
return i < last_used ? pool_size - i : i - last_used;
|
return Priority{static_cast<Int64>(i < last_used ? pool_size - i : i - last_used)};
|
||||||
};
|
};
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ public:
|
|||||||
return !(*this == other);
|
return !(*this == other);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::function<size_t(size_t index)> getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const;
|
std::function<Priority(size_t index)> getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const;
|
||||||
|
|
||||||
std::vector<size_t> hostname_differences; /// Distances from name of this host to the names of hosts of pools.
|
std::vector<size_t> hostname_differences; /// Distances from name of this host to the names of hosts of pools.
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <Common/NetException.h>
|
#include <Common/NetException.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/randomSeed.h>
|
#include <Common/randomSeed.h>
|
||||||
|
#include <Common/Priority.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -34,7 +35,7 @@ namespace ProfileEvents
|
|||||||
/// This class provides a pool with fault tolerance. It is used for pooling of connections to replicated DB.
|
/// This class provides a pool with fault tolerance. It is used for pooling of connections to replicated DB.
|
||||||
/// Initialized by several PoolBase objects.
|
/// Initialized by several PoolBase objects.
|
||||||
/// When a connection is requested, tries to create or choose an alive connection from one of the nested pools.
|
/// When a connection is requested, tries to create or choose an alive connection from one of the nested pools.
|
||||||
/// Pools are tried in the order consistent with lexicographical order of (error count, priority, random number) tuples.
|
/// Pools are tried in the order consistent with lexicographical order of (error count, slowdown count, config priority, priority, random number) tuples.
|
||||||
/// Number of tries for a single pool is limited by max_tries parameter.
|
/// Number of tries for a single pool is limited by max_tries parameter.
|
||||||
/// The client can set nested pool priority by passing a GetPriority functor.
|
/// The client can set nested pool priority by passing a GetPriority functor.
|
||||||
///
|
///
|
||||||
@ -113,7 +114,7 @@ public:
|
|||||||
|
|
||||||
/// The client can provide this functor to affect load balancing - the index of a pool is passed to
|
/// The client can provide this functor to affect load balancing - the index of a pool is passed to
|
||||||
/// this functor. The pools with lower result value will be tried first.
|
/// this functor. The pools with lower result value will be tried first.
|
||||||
using GetPriorityFunc = std::function<size_t(size_t index)>;
|
using GetPriorityFunc = std::function<Priority(size_t index)>;
|
||||||
|
|
||||||
/// Returns at least min_entries and at most max_entries connections (at most one connection per nested pool).
|
/// Returns at least min_entries and at most max_entries connections (at most one connection per nested pool).
|
||||||
/// The method will throw if it is unable to get min_entries alive connections or
|
/// The method will throw if it is unable to get min_entries alive connections or
|
||||||
@ -336,9 +337,9 @@ struct PoolWithFailoverBase<TNestedPool>::PoolState
|
|||||||
/// The number of slowdowns that led to changing replica in HedgedRequestsFactory
|
/// The number of slowdowns that led to changing replica in HedgedRequestsFactory
|
||||||
UInt64 slowdown_count = 0;
|
UInt64 slowdown_count = 0;
|
||||||
/// Priority from the <remote_server> configuration.
|
/// Priority from the <remote_server> configuration.
|
||||||
Int64 config_priority = 1;
|
Priority config_priority{1};
|
||||||
/// Priority from the GetPriorityFunc.
|
/// Priority from the GetPriorityFunc.
|
||||||
Int64 priority = 0;
|
Priority priority{0};
|
||||||
UInt64 random = 0;
|
UInt64 random = 0;
|
||||||
|
|
||||||
void randomize()
|
void randomize()
|
||||||
|
@ -381,11 +381,25 @@ The server successfully detected this situation and will download merged part fr
|
|||||||
M(CachedReadBufferReadFromCacheBytes, "Bytes read from filesystem cache") \
|
M(CachedReadBufferReadFromCacheBytes, "Bytes read from filesystem cache") \
|
||||||
M(CachedReadBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \
|
M(CachedReadBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \
|
||||||
M(CachedReadBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \
|
M(CachedReadBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \
|
||||||
|
M(CachedReadBufferCreateBufferMicroseconds, "Prepare buffer time") \
|
||||||
M(CachedWriteBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \
|
M(CachedWriteBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \
|
||||||
M(CachedWriteBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \
|
M(CachedWriteBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \
|
||||||
\
|
\
|
||||||
M(FilesystemCacheEvictedBytes, "Number of bytes evicted from filesystem cache") \
|
M(FilesystemCacheEvictedBytes, "Number of bytes evicted from filesystem cache") \
|
||||||
M(FilesystemCacheEvictedFileSegments, "Number of file segments evicted from filesystem cache") \
|
M(FilesystemCacheEvictedFileSegments, "Number of file segments evicted from filesystem cache") \
|
||||||
|
M(FilesystemCacheLockKeyMicroseconds, "Lock cache key time") \
|
||||||
|
M(FilesystemCacheLockMetadataMicroseconds, "Lock filesystem cache metadata time") \
|
||||||
|
M(FilesystemCacheLockCacheMicroseconds, "Lock filesystem cache time") \
|
||||||
|
M(FilesystemCacheReserveMicroseconds, "Filesystem cache space reservation time") \
|
||||||
|
M(FilesystemCacheGetOrSetMicroseconds, "Filesystem cache getOrSet() time") \
|
||||||
|
M(FilesystemCacheGetMicroseconds, "Filesystem cache get() time") \
|
||||||
|
M(FileSegmentWaitMicroseconds, "Wait on DOWNLOADING state") \
|
||||||
|
M(FileSegmentCompleteMicroseconds, "Duration of FileSegment::complete() in filesystem cache") \
|
||||||
|
M(FileSegmentLockMicroseconds, "Lock file segment time") \
|
||||||
|
M(FileSegmentWriteMicroseconds, "File segment write() time") \
|
||||||
|
M(FileSegmentUseMicroseconds, "File segment use() time") \
|
||||||
|
M(FileSegmentRemoveMicroseconds, "File segment remove() time") \
|
||||||
|
M(FileSegmentHolderCompleteMicroseconds, "File segments holder complete() time") \
|
||||||
\
|
\
|
||||||
M(RemoteFSSeeks, "Total number of seeks for async buffer") \
|
M(RemoteFSSeeks, "Total number of seeks for async buffer") \
|
||||||
M(RemoteFSPrefetches, "Number of prefetches made with asynchronous reading from remote filesystem") \
|
M(RemoteFSPrefetches, "Number of prefetches made with asynchronous reading from remote filesystem") \
|
||||||
@ -407,7 +421,6 @@ The server successfully detected this situation and will download merged part fr
|
|||||||
\
|
\
|
||||||
M(FileSegmentWaitReadBufferMicroseconds, "Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \
|
M(FileSegmentWaitReadBufferMicroseconds, "Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \
|
||||||
M(FileSegmentReadMicroseconds, "Metric per file segment. Time spend reading from file") \
|
M(FileSegmentReadMicroseconds, "Metric per file segment. Time spend reading from file") \
|
||||||
M(FileSegmentWriteMicroseconds, "Metric per file segment. Time spend writing cache") \
|
|
||||||
M(FileSegmentCacheWriteMicroseconds, "Metric per file segment. Time spend writing data to cache") \
|
M(FileSegmentCacheWriteMicroseconds, "Metric per file segment. Time spend writing data to cache") \
|
||||||
M(FileSegmentPredownloadMicroseconds, "Metric per file segment. Time spent predownloading data to cache (predownloading - finishing file segment download (after someone who failed to do that) up to the point current thread was requested to do)") \
|
M(FileSegmentPredownloadMicroseconds, "Metric per file segment. Time spent predownloading data to cache (predownloading - finishing file segment download (after someone who failed to do that) up to the point current thread was requested to do)") \
|
||||||
M(FileSegmentUsedBytes, "Metric per file segment. How many bytes were actually used from current file segment") \
|
M(FileSegmentUsedBytes, "Metric per file segment. How many bytes were actually used from current file segment") \
|
||||||
|
@ -179,7 +179,7 @@ ZooKeeper::ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std
|
|||||||
|
|
||||||
std::vector<ShuffleHost> ZooKeeper::shuffleHosts() const
|
std::vector<ShuffleHost> ZooKeeper::shuffleHosts() const
|
||||||
{
|
{
|
||||||
std::function<size_t(size_t index)> get_priority = args.get_priority_load_balancing.getPriorityFunc(args.get_priority_load_balancing.load_balancing, 0, args.hosts.size());
|
std::function<Priority(size_t index)> get_priority = args.get_priority_load_balancing.getPriorityFunc(args.get_priority_load_balancing.load_balancing, 0, args.hosts.size());
|
||||||
std::vector<ShuffleHost> shuffle_hosts;
|
std::vector<ShuffleHost> shuffle_hosts;
|
||||||
for (size_t i = 0; i < args.hosts.size(); ++i)
|
for (size_t i = 0; i < args.hosts.size(); ++i)
|
||||||
{
|
{
|
||||||
|
@ -49,7 +49,7 @@ constexpr size_t MULTI_BATCH_SIZE = 100;
|
|||||||
struct ShuffleHost
|
struct ShuffleHost
|
||||||
{
|
{
|
||||||
String host;
|
String host;
|
||||||
Int64 priority = 0;
|
Priority priority;
|
||||||
UInt64 random = 0;
|
UInt64 random = 0;
|
||||||
|
|
||||||
void randomize()
|
void randomize()
|
||||||
@ -526,6 +526,7 @@ public:
|
|||||||
String getConnectedZooKeeperHost() const { return connected_zk_host; }
|
String getConnectedZooKeeperHost() const { return connected_zk_host; }
|
||||||
UInt16 getConnectedZooKeeperPort() const { return connected_zk_port; }
|
UInt16 getConnectedZooKeeperPort() const { return connected_zk_port; }
|
||||||
size_t getConnectedZooKeeperIndex() const { return connected_zk_index; }
|
size_t getConnectedZooKeeperIndex() const { return connected_zk_index; }
|
||||||
|
UInt64 getConnectedTime() const { return connected_time; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void init(ZooKeeperArgs args_);
|
void init(ZooKeeperArgs args_);
|
||||||
@ -593,6 +594,7 @@ private:
|
|||||||
String connected_zk_host;
|
String connected_zk_host;
|
||||||
UInt16 connected_zk_port;
|
UInt16 connected_zk_port;
|
||||||
size_t connected_zk_index;
|
size_t connected_zk_index;
|
||||||
|
UInt64 connected_time = timeInSeconds(std::chrono::system_clock::now());
|
||||||
|
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
#include <Core/SettingsFields.h>
|
#include <Core/SettingsFields.h>
|
||||||
#include <Common/SettingsChanges.h>
|
#include <Common/SettingsChanges.h>
|
||||||
|
#include <Common/FieldVisitorToString.h>
|
||||||
|
#include <IO/Operators.h>
|
||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include <boost/blank.hpp>
|
#include <boost/blank.hpp>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
@ -547,14 +549,16 @@ void BaseSettings<TTraits>::read(ReadBuffer & in, SettingsWriteFormat format)
|
|||||||
template <typename TTraits>
|
template <typename TTraits>
|
||||||
String BaseSettings<TTraits>::toString() const
|
String BaseSettings<TTraits>::toString() const
|
||||||
{
|
{
|
||||||
String res;
|
WriteBufferFromOwnString out;
|
||||||
for (const auto & field : *this)
|
bool first = true;
|
||||||
|
for (const auto & setting : *this)
|
||||||
{
|
{
|
||||||
if (!res.empty())
|
if (!first)
|
||||||
res += ", ";
|
out << ", ";
|
||||||
res += field.getName() + " = " + field.getValueString();
|
out << setting.getName() << " = " << applyVisitor(FieldVisitorToString(), setting.getValue());
|
||||||
|
first = false;
|
||||||
}
|
}
|
||||||
return res;
|
return out.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename TTraits>
|
template <typename TTraits>
|
||||||
|
@ -127,7 +127,7 @@ namespace fmt
|
|||||||
template <typename FormatContext>
|
template <typename FormatContext>
|
||||||
auto format(const DB::QualifiedTableName & name, FormatContext & ctx)
|
auto format(const DB::QualifiedTableName & name, FormatContext & ctx)
|
||||||
{
|
{
|
||||||
return format_to(ctx.out(), "{}.{}", DB::backQuoteIfNeed(name.database), DB::backQuoteIfNeed(name.table));
|
return fmt::format_to(ctx.out(), "{}.{}", DB::backQuoteIfNeed(name.database), DB::backQuoteIfNeed(name.table));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -96,6 +96,7 @@ class IColumn;
|
|||||||
M(Bool, s3_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables.", 0) \
|
M(Bool, s3_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables.", 0) \
|
||||||
M(Bool, azure_truncate_on_insert, false, "Enables or disables truncate before insert in azure engine tables.", 0) \
|
M(Bool, azure_truncate_on_insert, false, "Enables or disables truncate before insert in azure engine tables.", 0) \
|
||||||
M(Bool, s3_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in s3 engine tables", 0) \
|
M(Bool, s3_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in s3 engine tables", 0) \
|
||||||
|
M(Bool, s3_skip_empty_files, false, "Allow to skip empty files in s3 table engine", 0) \
|
||||||
M(Bool, azure_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in azure engine tables", 0) \
|
M(Bool, azure_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in azure engine tables", 0) \
|
||||||
M(Bool, s3_check_objects_after_upload, false, "Check each uploaded object to s3 with head request to be sure that upload was successful", 0) \
|
M(Bool, s3_check_objects_after_upload, false, "Check each uploaded object to s3 with head request to be sure that upload was successful", 0) \
|
||||||
M(Bool, s3_allow_parallel_part_upload, true, "Use multiple threads for s3 multipart upload. It may lead to slightly higher memory usage", 0) \
|
M(Bool, s3_allow_parallel_part_upload, true, "Use multiple threads for s3 multipart upload. It may lead to slightly higher memory usage", 0) \
|
||||||
@ -105,6 +106,7 @@ class IColumn;
|
|||||||
M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \
|
M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \
|
||||||
M(Bool, hdfs_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables", 0) \
|
M(Bool, hdfs_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables", 0) \
|
||||||
M(Bool, hdfs_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in hdfs engine tables", 0) \
|
M(Bool, hdfs_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in hdfs engine tables", 0) \
|
||||||
|
M(Bool, hdfs_skip_empty_files, false, "Allow to skip empty files in hdfs table engine", 0) \
|
||||||
M(UInt64, hsts_max_age, 0, "Expired time for hsts. 0 means disable HSTS.", 0) \
|
M(UInt64, hsts_max_age, 0, "Expired time for hsts. 0 means disable HSTS.", 0) \
|
||||||
M(Bool, extremes, false, "Calculate minimums and maximums of the result columns. They can be output in JSON-formats.", IMPORTANT) \
|
M(Bool, extremes, false, "Calculate minimums and maximums of the result columns. They can be output in JSON-formats.", IMPORTANT) \
|
||||||
M(Bool, use_uncompressed_cache, false, "Whether to use the cache of uncompressed blocks.", 0) \
|
M(Bool, use_uncompressed_cache, false, "Whether to use the cache of uncompressed blocks.", 0) \
|
||||||
@ -612,6 +614,8 @@ class IColumn;
|
|||||||
M(Bool, engine_file_empty_if_not_exists, false, "Allows to select data from a file engine table without file", 0) \
|
M(Bool, engine_file_empty_if_not_exists, false, "Allows to select data from a file engine table without file", 0) \
|
||||||
M(Bool, engine_file_truncate_on_insert, false, "Enables or disables truncate before insert in file engine tables", 0) \
|
M(Bool, engine_file_truncate_on_insert, false, "Enables or disables truncate before insert in file engine tables", 0) \
|
||||||
M(Bool, engine_file_allow_create_multiple_files, false, "Enables or disables creating a new file on each insert in file engine tables if format has suffix.", 0) \
|
M(Bool, engine_file_allow_create_multiple_files, false, "Enables or disables creating a new file on each insert in file engine tables if format has suffix.", 0) \
|
||||||
|
M(Bool, engine_file_skip_empty_files, false, "Allows to skip empty files in file table engine", 0) \
|
||||||
|
M(Bool, engine_url_skip_empty_files, false, "Allows to skip empty files in url table engine", 0) \
|
||||||
M(Bool, allow_experimental_database_replicated, false, "Allow to create databases with Replicated engine", 0) \
|
M(Bool, allow_experimental_database_replicated, false, "Allow to create databases with Replicated engine", 0) \
|
||||||
M(UInt64, database_replicated_initial_query_timeout_sec, 300, "How long initial DDL query should wait for Replicated database to precess previous DDL queue entries", 0) \
|
M(UInt64, database_replicated_initial_query_timeout_sec, 300, "How long initial DDL query should wait for Replicated database to precess previous DDL queue entries", 0) \
|
||||||
M(Bool, database_replicated_enforce_synchronous_settings, false, "Enforces synchronous waiting for some queries (see also database_atomic_wait_for_drop_and_detach_synchronously, mutation_sync, alter_sync). Not recommended to enable these settings.", 0) \
|
M(Bool, database_replicated_enforce_synchronous_settings, false, "Enforces synchronous waiting for some queries (see also database_atomic_wait_for_drop_and_detach_synchronously, mutation_sync, alter_sync). Not recommended to enable these settings.", 0) \
|
||||||
@ -860,6 +864,7 @@ class IColumn;
|
|||||||
M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \
|
M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \
|
||||||
M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \
|
M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \
|
||||||
M(Bool, input_format_csv_detect_header, true, "Automatically detect header with names and types in CSV format", 0) \
|
M(Bool, input_format_csv_detect_header, true, "Automatically detect header with names and types in CSV format", 0) \
|
||||||
|
M(Bool, input_format_csv_allow_whitespace_or_tab_as_delimiter, false, "Allow to use spaces and tabs(\\t) as field delimiter in the CSV strings", 0) \
|
||||||
M(Bool, input_format_csv_trim_whitespaces, true, "Trims spaces and tabs (\\t) characters at the beginning and end in CSV strings", 0) \
|
M(Bool, input_format_csv_trim_whitespaces, true, "Trims spaces and tabs (\\t) characters at the beginning and end in CSV strings", 0) \
|
||||||
M(Bool, input_format_tsv_detect_header, true, "Automatically detect header with names and types in TSV format", 0) \
|
M(Bool, input_format_tsv_detect_header, true, "Automatically detect header with names and types in TSV format", 0) \
|
||||||
M(Bool, input_format_custom_detect_header, true, "Automatically detect header with names and types in CustomSeparated format", 0) \
|
M(Bool, input_format_custom_detect_header, true, "Automatically detect header with names and types in CustomSeparated format", 0) \
|
||||||
|
26
src/Core/ValueWithType.h
Normal file
26
src/Core/ValueWithType.h
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <DataTypes/IDataType.h>
|
||||||
|
|
||||||
|
namespace llvm
|
||||||
|
{
|
||||||
|
class Value;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/// LLVM value with its data type
|
||||||
|
struct ValueWithType
|
||||||
|
{
|
||||||
|
llvm::Value * value = nullptr;
|
||||||
|
DataTypePtr type;
|
||||||
|
|
||||||
|
ValueWithType() = default;
|
||||||
|
ValueWithType(llvm::Value * value_, DataTypePtr type_)
|
||||||
|
: value(value_)
|
||||||
|
, type(std::move(type_))
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
13
src/Core/ValuesWithType.h
Normal file
13
src/Core/ValuesWithType.h
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <Core/ValueWithType.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
using ValuesWithType = std::vector<ValueWithType>;
|
||||||
|
|
||||||
|
}
|
@ -54,6 +54,7 @@
|
|||||||
#include <Common/Elf.h>
|
#include <Common/Elf.h>
|
||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
#include <Loggers/OwnFormattingChannel.h>
|
#include <Loggers/OwnFormattingChannel.h>
|
||||||
@ -80,7 +81,9 @@ namespace DB
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DB::PipeFDs signal_pipe;
|
using namespace DB;
|
||||||
|
|
||||||
|
PipeFDs signal_pipe;
|
||||||
|
|
||||||
|
|
||||||
/** Reset signal handler to the default and send signal to itself.
|
/** Reset signal handler to the default and send signal to itself.
|
||||||
@ -89,10 +92,10 @@ DB::PipeFDs signal_pipe;
|
|||||||
static void call_default_signal_handler(int sig)
|
static void call_default_signal_handler(int sig)
|
||||||
{
|
{
|
||||||
if (SIG_ERR == signal(sig, SIG_DFL))
|
if (SIG_ERR == signal(sig, SIG_DFL))
|
||||||
DB::throwFromErrno("Cannot set signal handler.", DB::ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||||
|
|
||||||
if (0 != raise(sig))
|
if (0 != raise(sig))
|
||||||
DB::throwFromErrno("Cannot send signal.", DB::ErrorCodes::CANNOT_SEND_SIGNAL);
|
throwFromErrno("Cannot send signal.", ErrorCodes::CANNOT_SEND_SIGNAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const size_t signal_pipe_buf_size =
|
static const size_t signal_pipe_buf_size =
|
||||||
@ -110,8 +113,8 @@ static void writeSignalIDtoSignalPipe(int sig)
|
|||||||
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
|
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
|
||||||
|
|
||||||
char buf[signal_pipe_buf_size];
|
char buf[signal_pipe_buf_size];
|
||||||
DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
||||||
DB::writeBinary(sig, out);
|
writeBinary(sig, out);
|
||||||
out.next();
|
out.next();
|
||||||
|
|
||||||
errno = saved_errno;
|
errno = saved_errno;
|
||||||
@ -141,17 +144,17 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
|
|||||||
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
|
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
|
||||||
|
|
||||||
char buf[signal_pipe_buf_size];
|
char buf[signal_pipe_buf_size];
|
||||||
DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
||||||
|
|
||||||
const ucontext_t * signal_context = reinterpret_cast<ucontext_t *>(context);
|
const ucontext_t * signal_context = reinterpret_cast<ucontext_t *>(context);
|
||||||
const StackTrace stack_trace(*signal_context);
|
const StackTrace stack_trace(*signal_context);
|
||||||
|
|
||||||
DB::writeBinary(sig, out);
|
writeBinary(sig, out);
|
||||||
DB::writePODBinary(*info, out);
|
writePODBinary(*info, out);
|
||||||
DB::writePODBinary(signal_context, out);
|
writePODBinary(signal_context, out);
|
||||||
DB::writePODBinary(stack_trace, out);
|
writePODBinary(stack_trace, out);
|
||||||
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
|
writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||||
DB::writePODBinary(DB::current_thread, out);
|
writePODBinary(current_thread, out);
|
||||||
|
|
||||||
out.next();
|
out.next();
|
||||||
|
|
||||||
@ -203,12 +206,12 @@ public:
|
|||||||
static_assert(PIPE_BUF >= 512);
|
static_assert(PIPE_BUF >= 512);
|
||||||
static_assert(signal_pipe_buf_size <= PIPE_BUF, "Only write of PIPE_BUF to pipe is atomic and the minimal known PIPE_BUF across supported platforms is 512");
|
static_assert(signal_pipe_buf_size <= PIPE_BUF, "Only write of PIPE_BUF to pipe is atomic and the minimal known PIPE_BUF across supported platforms is 512");
|
||||||
char buf[signal_pipe_buf_size];
|
char buf[signal_pipe_buf_size];
|
||||||
DB::ReadBufferFromFileDescriptor in(signal_pipe.fds_rw[0], signal_pipe_buf_size, buf);
|
ReadBufferFromFileDescriptor in(signal_pipe.fds_rw[0], signal_pipe_buf_size, buf);
|
||||||
|
|
||||||
while (!in.eof())
|
while (!in.eof())
|
||||||
{
|
{
|
||||||
int sig = 0;
|
int sig = 0;
|
||||||
DB::readBinary(sig, in);
|
readBinary(sig, in);
|
||||||
// We may log some specific signals afterwards, with different log
|
// We may log some specific signals afterwards, with different log
|
||||||
// levels and more info, but for completeness we log all signals
|
// levels and more info, but for completeness we log all signals
|
||||||
// here at trace level.
|
// here at trace level.
|
||||||
@ -231,8 +234,8 @@ public:
|
|||||||
UInt32 thread_num;
|
UInt32 thread_num;
|
||||||
std::string message;
|
std::string message;
|
||||||
|
|
||||||
DB::readBinary(thread_num, in);
|
readBinary(thread_num, in);
|
||||||
DB::readBinary(message, in);
|
readBinary(message, in);
|
||||||
|
|
||||||
onTerminate(message, thread_num);
|
onTerminate(message, thread_num);
|
||||||
}
|
}
|
||||||
@ -248,17 +251,17 @@ public:
|
|||||||
ucontext_t * context{};
|
ucontext_t * context{};
|
||||||
StackTrace stack_trace(NoCapture{});
|
StackTrace stack_trace(NoCapture{});
|
||||||
UInt32 thread_num{};
|
UInt32 thread_num{};
|
||||||
DB::ThreadStatus * thread_ptr{};
|
ThreadStatus * thread_ptr{};
|
||||||
|
|
||||||
if (sig != SanitizerTrap)
|
if (sig != SanitizerTrap)
|
||||||
{
|
{
|
||||||
DB::readPODBinary(info, in);
|
readPODBinary(info, in);
|
||||||
DB::readPODBinary(context, in);
|
readPODBinary(context, in);
|
||||||
}
|
}
|
||||||
|
|
||||||
DB::readPODBinary(stack_trace, in);
|
readPODBinary(stack_trace, in);
|
||||||
DB::readBinary(thread_num, in);
|
readBinary(thread_num, in);
|
||||||
DB::readPODBinary(thread_ptr, in);
|
readPODBinary(thread_ptr, in);
|
||||||
|
|
||||||
/// This allows to receive more signals if failure happens inside onFault function.
|
/// This allows to receive more signals if failure happens inside onFault function.
|
||||||
/// Example: segfault while symbolizing stack trace.
|
/// Example: segfault while symbolizing stack trace.
|
||||||
@ -298,9 +301,9 @@ private:
|
|||||||
ucontext_t * context,
|
ucontext_t * context,
|
||||||
const StackTrace & stack_trace,
|
const StackTrace & stack_trace,
|
||||||
UInt32 thread_num,
|
UInt32 thread_num,
|
||||||
DB::ThreadStatus * thread_ptr) const
|
ThreadStatus * thread_ptr) const
|
||||||
{
|
{
|
||||||
DB::ThreadStatus thread_status;
|
ThreadStatus thread_status;
|
||||||
|
|
||||||
String query_id;
|
String query_id;
|
||||||
String query;
|
String query;
|
||||||
@ -314,7 +317,7 @@ private:
|
|||||||
|
|
||||||
if (auto logs_queue = thread_ptr->getInternalTextLogsQueue())
|
if (auto logs_queue = thread_ptr->getInternalTextLogsQueue())
|
||||||
{
|
{
|
||||||
DB::CurrentThread::attachInternalTextLogsQueue(logs_queue, DB::LogsLevel::trace);
|
CurrentThread::attachInternalTextLogsQueue(logs_queue, LogsLevel::trace);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,12 +361,12 @@ private:
|
|||||||
/// NOTE: This still require memory allocations and mutex lock inside logger.
|
/// NOTE: This still require memory allocations and mutex lock inside logger.
|
||||||
/// BTW we can also print it to stderr using write syscalls.
|
/// BTW we can also print it to stderr using write syscalls.
|
||||||
|
|
||||||
DB::WriteBufferFromOwnString bare_stacktrace;
|
WriteBufferFromOwnString bare_stacktrace;
|
||||||
DB::writeString("Stack trace:", bare_stacktrace);
|
writeString("Stack trace:", bare_stacktrace);
|
||||||
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
|
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
|
||||||
{
|
{
|
||||||
DB::writeChar(' ', bare_stacktrace);
|
writeChar(' ', bare_stacktrace);
|
||||||
DB::writePointerHex(stack_trace.getFramePointers()[i], bare_stacktrace);
|
writePointerHex(stack_trace.getFramePointers()[i], bare_stacktrace);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_FATAL(log, fmt::runtime(bare_stacktrace.str()));
|
LOG_FATAL(log, fmt::runtime(bare_stacktrace.str()));
|
||||||
@ -411,8 +414,48 @@ private:
|
|||||||
|
|
||||||
/// Send crash report to developers (if configured)
|
/// Send crash report to developers (if configured)
|
||||||
if (sig != SanitizerTrap)
|
if (sig != SanitizerTrap)
|
||||||
|
{
|
||||||
SentryWriter::onFault(sig, error_message, stack_trace);
|
SentryWriter::onFault(sig, error_message, stack_trace);
|
||||||
|
|
||||||
|
/// Advice the user to send it manually.
|
||||||
|
if constexpr (std::string_view(VERSION_OFFICIAL).contains("official build"))
|
||||||
|
{
|
||||||
|
const auto & date_lut = DateLUT::instance();
|
||||||
|
|
||||||
|
/// Approximate support period, upper bound.
|
||||||
|
if (time(nullptr) - date_lut.makeDate(2000 + VERSION_MAJOR, VERSION_MINOR, 1) < (365 + 30) * 86400)
|
||||||
|
{
|
||||||
|
LOG_FATAL(log, "Report this error to https://github.com/ClickHouse/ClickHouse/issues");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_FATAL(log, "ClickHouse version {} is old and should be upgraded to the latest version.", VERSION_STRING);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_FATAL(log, "This ClickHouse version is not official and should be upgraded to the official build.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// ClickHouse Keeper does not link to some part of Settings.
|
||||||
|
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
||||||
|
/// List changed settings.
|
||||||
|
if (!query_id.empty())
|
||||||
|
{
|
||||||
|
ContextPtr query_context = thread_ptr->getQueryContext();
|
||||||
|
if (query_context)
|
||||||
|
{
|
||||||
|
String changed_settings = query_context->getSettingsRef().toString();
|
||||||
|
|
||||||
|
if (changed_settings.empty())
|
||||||
|
LOG_FATAL(log, "No settings were changed");
|
||||||
|
else
|
||||||
|
LOG_FATAL(log, "Changed settings: {}", changed_settings);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/// When everything is done, we will try to send these error messages to client.
|
/// When everything is done, we will try to send these error messages to client.
|
||||||
if (thread_ptr)
|
if (thread_ptr)
|
||||||
thread_ptr->onFatalError();
|
thread_ptr->onFatalError();
|
||||||
@ -436,15 +479,15 @@ static DISABLE_SANITIZER_INSTRUMENTATION void sanitizerDeathCallback()
|
|||||||
/// Also need to send data via pipe. Otherwise it may lead to deadlocks or failures in printing diagnostic info.
|
/// Also need to send data via pipe. Otherwise it may lead to deadlocks or failures in printing diagnostic info.
|
||||||
|
|
||||||
char buf[signal_pipe_buf_size];
|
char buf[signal_pipe_buf_size];
|
||||||
DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
||||||
|
|
||||||
const StackTrace stack_trace;
|
const StackTrace stack_trace;
|
||||||
|
|
||||||
int sig = SignalListener::SanitizerTrap;
|
int sig = SignalListener::SanitizerTrap;
|
||||||
DB::writeBinary(sig, out);
|
writeBinary(sig, out);
|
||||||
DB::writePODBinary(stack_trace, out);
|
writePODBinary(stack_trace, out);
|
||||||
DB::writeBinary(UInt32(getThreadId()), out);
|
writeBinary(UInt32(getThreadId()), out);
|
||||||
DB::writePODBinary(DB::current_thread, out);
|
writePODBinary(current_thread, out);
|
||||||
|
|
||||||
out.next();
|
out.next();
|
||||||
|
|
||||||
@ -470,7 +513,7 @@ static DISABLE_SANITIZER_INSTRUMENTATION void sanitizerDeathCallback()
|
|||||||
std::string log_message;
|
std::string log_message;
|
||||||
|
|
||||||
if (std::current_exception())
|
if (std::current_exception())
|
||||||
log_message = "Terminate called for uncaught exception:\n" + DB::getCurrentExceptionMessage(true);
|
log_message = "Terminate called for uncaught exception:\n" + getCurrentExceptionMessage(true);
|
||||||
else
|
else
|
||||||
log_message = "Terminate called without an active exception";
|
log_message = "Terminate called without an active exception";
|
||||||
|
|
||||||
@ -482,11 +525,11 @@ static DISABLE_SANITIZER_INSTRUMENTATION void sanitizerDeathCallback()
|
|||||||
log_message.resize(buf_size - 16);
|
log_message.resize(buf_size - 16);
|
||||||
|
|
||||||
char buf[buf_size];
|
char buf[buf_size];
|
||||||
DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf);
|
WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf);
|
||||||
|
|
||||||
DB::writeBinary(static_cast<int>(SignalListener::StdTerminate), out);
|
writeBinary(static_cast<int>(SignalListener::StdTerminate), out);
|
||||||
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
|
writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||||
DB::writeBinary(log_message, out);
|
writeBinary(log_message, out);
|
||||||
out.next();
|
out.next();
|
||||||
|
|
||||||
abort();
|
abort();
|
||||||
@ -512,7 +555,7 @@ static bool tryCreateDirectories(Poco::Logger * logger, const std::string & path
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_WARNING(logger, "{}: when creating {}, {}", __PRETTY_FUNCTION__, path, DB::getCurrentExceptionMessage(true));
|
LOG_WARNING(logger, "{}: when creating {}, {}", __PRETTY_FUNCTION__, path, getCurrentExceptionMessage(true));
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -527,7 +570,7 @@ void BaseDaemon::reloadConfiguration()
|
|||||||
* (It's convenient to log in console when you start server without any command line parameters.)
|
* (It's convenient to log in console when you start server without any command line parameters.)
|
||||||
*/
|
*/
|
||||||
config_path = config().getString("config-file", getDefaultConfigFileName());
|
config_path = config().getString("config-file", getDefaultConfigFileName());
|
||||||
DB::ConfigProcessor config_processor(config_path, false, true);
|
ConfigProcessor config_processor(config_path, false, true);
|
||||||
config_processor.setConfigPath(fs::path(config_path).parent_path());
|
config_processor.setConfigPath(fs::path(config_path).parent_path());
|
||||||
loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true);
|
loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true);
|
||||||
|
|
||||||
@ -548,7 +591,7 @@ BaseDaemon::~BaseDaemon()
|
|||||||
/// Reset signals to SIG_DFL to avoid trying to write to the signal_pipe that will be closed after.
|
/// Reset signals to SIG_DFL to avoid trying to write to the signal_pipe that will be closed after.
|
||||||
for (int sig : handled_signals)
|
for (int sig : handled_signals)
|
||||||
if (SIG_ERR == signal(sig, SIG_DFL))
|
if (SIG_ERR == signal(sig, SIG_DFL))
|
||||||
DB::throwFromErrno("Cannot set signal handler.", DB::ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||||
signal_pipe.close();
|
signal_pipe.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -592,7 +635,7 @@ void BaseDaemon::closeFDs()
|
|||||||
/// Iterate directory separately from closing fds to avoid closing iterated directory fd.
|
/// Iterate directory separately from closing fds to avoid closing iterated directory fd.
|
||||||
std::vector<int> fds;
|
std::vector<int> fds;
|
||||||
for (const auto & path : fs::directory_iterator(proc_path))
|
for (const auto & path : fs::directory_iterator(proc_path))
|
||||||
fds.push_back(DB::parse<int>(path.path().filename()));
|
fds.push_back(parse<int>(path.path().filename()));
|
||||||
|
|
||||||
for (const auto & fd : fds)
|
for (const auto & fd : fds)
|
||||||
{
|
{
|
||||||
@ -662,7 +705,7 @@ void BaseDaemon::initialize(Application & self)
|
|||||||
}
|
}
|
||||||
umask(umask_num);
|
umask(umask_num);
|
||||||
|
|
||||||
DB::ConfigProcessor(config_path).savePreprocessedConfig(loaded_config, "");
|
ConfigProcessor(config_path).savePreprocessedConfig(loaded_config, "");
|
||||||
|
|
||||||
/// Write core dump on crash.
|
/// Write core dump on crash.
|
||||||
{
|
{
|
||||||
@ -713,12 +756,12 @@ void BaseDaemon::initialize(Application & self)
|
|||||||
/// {
|
/// {
|
||||||
/// try
|
/// try
|
||||||
/// {
|
/// {
|
||||||
/// DB::SomeApp app;
|
/// SomeApp app;
|
||||||
/// return app.run(argc, argv);
|
/// return app.run(argc, argv);
|
||||||
/// }
|
/// }
|
||||||
/// catch (...)
|
/// catch (...)
|
||||||
/// {
|
/// {
|
||||||
/// std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
/// std::cerr << getCurrentExceptionMessage(true) << "\n";
|
||||||
/// return 1;
|
/// return 1;
|
||||||
/// }
|
/// }
|
||||||
/// }
|
/// }
|
||||||
@ -772,7 +815,7 @@ void BaseDaemon::initialize(Application & self)
|
|||||||
|
|
||||||
/// Create pid file.
|
/// Create pid file.
|
||||||
if (config().has("pid"))
|
if (config().has("pid"))
|
||||||
pid_file.emplace(config().getString("pid"), DB::StatusFile::write_pid);
|
pid_file.emplace(config().getString("pid"), StatusFile::write_pid);
|
||||||
|
|
||||||
if (is_daemon)
|
if (is_daemon)
|
||||||
{
|
{
|
||||||
@ -799,7 +842,7 @@ void BaseDaemon::initialize(Application & self)
|
|||||||
initializeTerminationAndSignalProcessing();
|
initializeTerminationAndSignalProcessing();
|
||||||
logRevision();
|
logRevision();
|
||||||
|
|
||||||
for (const auto & key : DB::getMultipleKeysFromConfig(config(), "", "graphite"))
|
for (const auto & key : getMultipleKeysFromConfig(config(), "", "graphite"))
|
||||||
{
|
{
|
||||||
graphite_writers.emplace(key, std::make_unique<GraphiteWriter>(key));
|
graphite_writers.emplace(key, std::make_unique<GraphiteWriter>(key));
|
||||||
}
|
}
|
||||||
@ -887,7 +930,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
|||||||
signal_listener_thread.start(*signal_listener);
|
signal_listener_thread.start(*signal_listener);
|
||||||
|
|
||||||
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
||||||
String build_id_hex = DB::SymbolIndex::instance()->getBuildIDHex();
|
String build_id_hex = SymbolIndex::instance()->getBuildIDHex();
|
||||||
if (build_id_hex.empty())
|
if (build_id_hex.empty())
|
||||||
build_id = "";
|
build_id = "";
|
||||||
else
|
else
|
||||||
@ -902,7 +945,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
|||||||
std::string executable_path = getExecutablePath();
|
std::string executable_path = getExecutablePath();
|
||||||
|
|
||||||
if (!executable_path.empty())
|
if (!executable_path.empty())
|
||||||
stored_binary_hash = DB::Elf(executable_path).getStoredBinaryHash();
|
stored_binary_hash = Elf(executable_path).getStoredBinaryHash();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -963,7 +1006,7 @@ void BaseDaemon::handleSignal(int signal_id)
|
|||||||
onInterruptSignals(signal_id);
|
onInterruptSignals(signal_id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
throw DB::Exception::createDeprecated(std::string("Unsupported signal: ") + strsignal(signal_id), 0); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context
|
throw Exception::createDeprecated(std::string("Unsupported signal: ") + strsignal(signal_id), 0); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaseDaemon::onInterruptSignals(int signal_id)
|
void BaseDaemon::onInterruptSignals(int signal_id)
|
||||||
@ -1020,7 +1063,7 @@ void BaseDaemon::setupWatchdog()
|
|||||||
pid = fork();
|
pid = fork();
|
||||||
|
|
||||||
if (-1 == pid)
|
if (-1 == pid)
|
||||||
DB::throwFromErrno("Cannot fork", DB::ErrorCodes::SYSTEM_ERROR);
|
throwFromErrno("Cannot fork", ErrorCodes::SYSTEM_ERROR);
|
||||||
|
|
||||||
if (0 == pid)
|
if (0 == pid)
|
||||||
{
|
{
|
||||||
@ -1073,13 +1116,13 @@ void BaseDaemon::setupWatchdog()
|
|||||||
pf = new OwnJSONPatternFormatter(config());
|
pf = new OwnJSONPatternFormatter(config());
|
||||||
else
|
else
|
||||||
pf = new OwnPatternFormatter;
|
pf = new OwnPatternFormatter;
|
||||||
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr));
|
Poco::AutoPtr<OwnFormattingChannel> log = new OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr));
|
||||||
logger().setChannel(log);
|
logger().setChannel(log);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cuncurrent writing logs to the same file from two threads is questionable on its own,
|
/// Cuncurrent writing logs to the same file from two threads is questionable on its own,
|
||||||
/// but rotating them from two threads is disastrous.
|
/// but rotating them from two threads is disastrous.
|
||||||
if (auto * channel = dynamic_cast<DB::OwnSplitChannel *>(logger().getChannel()))
|
if (auto * channel = dynamic_cast<OwnSplitChannel *>(logger().getChannel()))
|
||||||
{
|
{
|
||||||
channel->setChannelProperty("log", Poco::FileChannel::PROP_ROTATION, "never");
|
channel->setChannelProperty("log", Poco::FileChannel::PROP_ROTATION, "never");
|
||||||
channel->setChannelProperty("log", Poco::FileChannel::PROP_ROTATEONOPEN, "false");
|
channel->setChannelProperty("log", Poco::FileChannel::PROP_ROTATEONOPEN, "false");
|
||||||
@ -1191,7 +1234,7 @@ void systemdNotify(const std::string_view & command)
|
|||||||
int s = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0);
|
int s = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0);
|
||||||
|
|
||||||
if (s == -1)
|
if (s == -1)
|
||||||
DB::throwFromErrno("Can't create UNIX socket for systemd notify.", DB::ErrorCodes::SYSTEM_ERROR);
|
throwFromErrno("Can't create UNIX socket for systemd notify.", ErrorCodes::SYSTEM_ERROR);
|
||||||
|
|
||||||
SCOPE_EXIT({ close(s); });
|
SCOPE_EXIT({ close(s); });
|
||||||
|
|
||||||
@ -1202,7 +1245,7 @@ void systemdNotify(const std::string_view & command)
|
|||||||
addr.sun_family = AF_UNIX;
|
addr.sun_family = AF_UNIX;
|
||||||
|
|
||||||
if (len < 2 || len > sizeof(addr.sun_path) - 1)
|
if (len < 2 || len > sizeof(addr.sun_path) - 1)
|
||||||
throw DB::Exception(DB::ErrorCodes::SYSTEM_ERROR, "NOTIFY_SOCKET env var value \"{}\" is wrong.", path);
|
throw Exception(ErrorCodes::SYSTEM_ERROR, "NOTIFY_SOCKET env var value \"{}\" is wrong.", path);
|
||||||
|
|
||||||
memcpy(addr.sun_path, path, len + 1); /// write last zero as well.
|
memcpy(addr.sun_path, path, len + 1); /// write last zero as well.
|
||||||
|
|
||||||
@ -1214,7 +1257,7 @@ void systemdNotify(const std::string_view & command)
|
|||||||
else if (path[0] == '/')
|
else if (path[0] == '/')
|
||||||
addrlen += 1; /// non-abstract-addresses should be zero terminated.
|
addrlen += 1; /// non-abstract-addresses should be zero terminated.
|
||||||
else
|
else
|
||||||
throw DB::Exception(DB::ErrorCodes::SYSTEM_ERROR, "Wrong UNIX path \"{}\" in NOTIFY_SOCKET env var", path);
|
throw Exception(ErrorCodes::SYSTEM_ERROR, "Wrong UNIX path \"{}\" in NOTIFY_SOCKET env var", path);
|
||||||
|
|
||||||
const struct sockaddr *sock_addr = reinterpret_cast <const struct sockaddr *>(&addr);
|
const struct sockaddr *sock_addr = reinterpret_cast <const struct sockaddr *>(&addr);
|
||||||
|
|
||||||
@ -1227,7 +1270,7 @@ void systemdNotify(const std::string_view & command)
|
|||||||
if (errno == EINTR)
|
if (errno == EINTR)
|
||||||
continue;
|
continue;
|
||||||
else
|
else
|
||||||
DB::throwFromErrno("Failed to notify systemd, sendto returned error.", DB::ErrorCodes::SYSTEM_ERROR);
|
throwFromErrno("Failed to notify systemd, sendto returned error.", ErrorCodes::SYSTEM_ERROR);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
sent_bytes_total += sent_bytes;
|
sent_bytes_total += sent_bytes;
|
||||||
|
@ -37,6 +37,8 @@ public:
|
|||||||
|
|
||||||
bool canBeUsedAsVersion() const override { return true; }
|
bool canBeUsedAsVersion() const override { return true; }
|
||||||
|
|
||||||
|
bool isSummable() const override { return false; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
SerializationPtr doGetDefaultSerialization() const override;
|
SerializationPtr doGetDefaultSerialization() const override;
|
||||||
};
|
};
|
||||||
|
@ -532,11 +532,6 @@ inline bool isNotDecimalButComparableToDecimal(const DataTypePtr & data_type)
|
|||||||
return which.isInt() || which.isUInt() || which.isFloat();
|
return which.isInt() || which.isUInt() || which.isFloat();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool isCompilableType(const DataTypePtr & data_type)
|
|
||||||
{
|
|
||||||
return data_type->isValueRepresentedByNumber() && !isDecimal(data_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool isBool(const DataTypePtr & data_type)
|
inline bool isBool(const DataTypePtr & data_type)
|
||||||
{
|
{
|
||||||
return data_type->getName() == "Bool";
|
return data_type->getName() == "Bool";
|
||||||
|
200
src/DataTypes/Native.cpp
Normal file
200
src/DataTypes/Native.cpp
Normal file
@ -0,0 +1,200 @@
|
|||||||
|
#include <DataTypes/Native.h>
|
||||||
|
|
||||||
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
# include <DataTypes/DataTypeNullable.h>
|
||||||
|
# include <Columns/ColumnConst.h>
|
||||||
|
# include <Columns/ColumnNullable.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool typeIsSigned(const IDataType & type)
|
||||||
|
{
|
||||||
|
WhichDataType data_type(type);
|
||||||
|
return data_type.isNativeInt() || data_type.isFloat() || data_type.isEnum() || data_type.isDate32();
|
||||||
|
}
|
||||||
|
|
||||||
|
llvm::Type * toNullableType(llvm::IRBuilderBase & builder, llvm::Type * type)
|
||||||
|
{
|
||||||
|
auto * is_null_type = builder.getInt1Ty();
|
||||||
|
return llvm::StructType::get(type, is_null_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool canBeNativeType(const IDataType & type)
|
||||||
|
{
|
||||||
|
WhichDataType data_type(type);
|
||||||
|
|
||||||
|
if (data_type.isNullable())
|
||||||
|
{
|
||||||
|
const auto & data_type_nullable = static_cast<const DataTypeNullable&>(type);
|
||||||
|
return canBeNativeType(*data_type_nullable.getNestedType());
|
||||||
|
}
|
||||||
|
|
||||||
|
return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate()
|
||||||
|
|| data_type.isDate32() || data_type.isDateTime() || data_type.isEnum();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool canBeNativeType(const DataTypePtr & type)
|
||||||
|
{
|
||||||
|
return canBeNativeType(*type);
|
||||||
|
}
|
||||||
|
|
||||||
|
llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDataType & type)
|
||||||
|
{
|
||||||
|
WhichDataType data_type(type);
|
||||||
|
|
||||||
|
if (data_type.isNullable())
|
||||||
|
{
|
||||||
|
const auto & data_type_nullable = static_cast<const DataTypeNullable&>(type);
|
||||||
|
auto * nested_type = toNativeType(builder, *data_type_nullable.getNestedType());
|
||||||
|
return toNullableType(builder, nested_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// LLVM doesn't have unsigned types, it has unsigned instructions.
|
||||||
|
if (data_type.isInt8() || data_type.isUInt8())
|
||||||
|
return builder.getInt8Ty();
|
||||||
|
else if (data_type.isInt16() || data_type.isUInt16() || data_type.isDate())
|
||||||
|
return builder.getInt16Ty();
|
||||||
|
else if (data_type.isInt32() || data_type.isUInt32() || data_type.isDate32() || data_type.isDateTime())
|
||||||
|
return builder.getInt32Ty();
|
||||||
|
else if (data_type.isInt64() || data_type.isUInt64())
|
||||||
|
return builder.getInt64Ty();
|
||||||
|
else if (data_type.isFloat32())
|
||||||
|
return builder.getFloatTy();
|
||||||
|
else if (data_type.isFloat64())
|
||||||
|
return builder.getDoubleTy();
|
||||||
|
else if (data_type.isEnum8())
|
||||||
|
return builder.getInt8Ty();
|
||||||
|
else if (data_type.isEnum16())
|
||||||
|
return builder.getInt16Ty();
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid cast to native type");
|
||||||
|
}
|
||||||
|
|
||||||
|
llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type)
|
||||||
|
{
|
||||||
|
return toNativeType(builder, *type);
|
||||||
|
}
|
||||||
|
|
||||||
|
llvm::Value * nativeBoolCast(llvm::IRBuilderBase & b, const DataTypePtr & from_type, llvm::Value * value)
|
||||||
|
{
|
||||||
|
if (from_type->isNullable())
|
||||||
|
{
|
||||||
|
auto * inner = nativeBoolCast(b, removeNullable(from_type), b.CreateExtractValue(value, {0}));
|
||||||
|
return b.CreateAnd(b.CreateNot(b.CreateExtractValue(value, {1})), inner);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto * zero = llvm::Constant::getNullValue(value->getType());
|
||||||
|
|
||||||
|
if (value->getType()->isIntegerTy())
|
||||||
|
return b.CreateICmpNE(value, zero);
|
||||||
|
else if (value->getType()->isFloatingPointTy())
|
||||||
|
return b.CreateFCmpUNE(value, zero);
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast non-number {} to bool", from_type->getName());
|
||||||
|
}
|
||||||
|
|
||||||
|
llvm::Value * nativeBoolCast(llvm::IRBuilderBase & b, const ValueWithType & value_with_type)
|
||||||
|
{
|
||||||
|
return nativeBoolCast(b, value_with_type.type, value_with_type.value);
|
||||||
|
}
|
||||||
|
|
||||||
|
llvm::Value * nativeCast(llvm::IRBuilderBase & b, const DataTypePtr & from_type, llvm::Value * value, const DataTypePtr & to_type)
|
||||||
|
{
|
||||||
|
if (from_type->equals(*to_type))
|
||||||
|
{
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
else if (from_type->isNullable() && to_type->isNullable())
|
||||||
|
{
|
||||||
|
auto * inner = nativeCast(b, removeNullable(from_type), b.CreateExtractValue(value, {0}), to_type);
|
||||||
|
return b.CreateInsertValue(inner, b.CreateExtractValue(value, {1}), {1});
|
||||||
|
}
|
||||||
|
else if (from_type->isNullable())
|
||||||
|
{
|
||||||
|
return nativeCast(b, removeNullable(from_type), b.CreateExtractValue(value, {0}), to_type);
|
||||||
|
}
|
||||||
|
else if (to_type->isNullable())
|
||||||
|
{
|
||||||
|
auto * from_native_type = toNativeType(b, from_type);
|
||||||
|
auto * inner = nativeCast(b, from_type, value, removeNullable(to_type));
|
||||||
|
return b.CreateInsertValue(llvm::Constant::getNullValue(from_native_type), inner, {0});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto * from_native_type = toNativeType(b, from_type);
|
||||||
|
auto * to_native_type = toNativeType(b, to_type);
|
||||||
|
|
||||||
|
if (from_native_type == to_native_type)
|
||||||
|
return value;
|
||||||
|
else if (from_native_type->isIntegerTy() && to_native_type->isFloatingPointTy())
|
||||||
|
return typeIsSigned(*from_type) ? b.CreateSIToFP(value, to_native_type) : b.CreateUIToFP(value, to_native_type);
|
||||||
|
else if (from_native_type->isFloatingPointTy() && to_native_type->isIntegerTy())
|
||||||
|
return typeIsSigned(*to_type) ? b.CreateFPToSI(value, to_native_type) : b.CreateFPToUI(value, to_native_type);
|
||||||
|
else if (from_native_type->isIntegerTy() && from_native_type->isIntegerTy())
|
||||||
|
return b.CreateIntCast(value, to_native_type, typeIsSigned(*from_type));
|
||||||
|
else if (to_native_type->isFloatingPointTy() && to_native_type->isFloatingPointTy())
|
||||||
|
return b.CreateFPCast(value, to_native_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Invalid cast to native value from type {} to type {}",
|
||||||
|
from_type->getName(),
|
||||||
|
to_type->getName());
|
||||||
|
}
|
||||||
|
|
||||||
|
llvm::Value * nativeCast(llvm::IRBuilderBase & b, const ValueWithType & value, const DataTypePtr & to_type)
|
||||||
|
{
|
||||||
|
return nativeCast(b, value.type, value.value, to_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builder, const DataTypePtr & column_type, const IColumn & column, size_t index)
|
||||||
|
{
|
||||||
|
if (const auto * constant = typeid_cast<const ColumnConst *>(&column))
|
||||||
|
return getColumnNativeValue(builder, column_type, constant->getDataColumn(), 0);
|
||||||
|
|
||||||
|
auto * type = toNativeType(builder, column_type);
|
||||||
|
|
||||||
|
WhichDataType column_data_type(column_type);
|
||||||
|
if (column_data_type.isNullable())
|
||||||
|
{
|
||||||
|
const auto & nullable_data_type = assert_cast<const DataTypeNullable &>(*column_type);
|
||||||
|
const auto & nullable_column = assert_cast<const ColumnNullable &>(column);
|
||||||
|
|
||||||
|
auto * value = getColumnNativeValue(builder, nullable_data_type.getNestedType(), nullable_column.getNestedColumn(), index);
|
||||||
|
auto * is_null = llvm::ConstantInt::get(type->getContainedType(1), nullable_column.isNullAt(index));
|
||||||
|
|
||||||
|
return llvm::ConstantStruct::get(static_cast<llvm::StructType *>(type), value, is_null);
|
||||||
|
}
|
||||||
|
else if (column_data_type.isFloat32())
|
||||||
|
{
|
||||||
|
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float32> &>(column).getElement(index));
|
||||||
|
}
|
||||||
|
else if (column_data_type.isFloat64())
|
||||||
|
{
|
||||||
|
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float64> &>(column).getElement(index));
|
||||||
|
}
|
||||||
|
else if (column_data_type.isNativeUInt() || column_data_type.isDate() || column_data_type.isDateTime())
|
||||||
|
{
|
||||||
|
return llvm::ConstantInt::get(type, column.getUInt(index));
|
||||||
|
}
|
||||||
|
else if (column_data_type.isNativeInt() || column_data_type.isEnum() || column_data_type.isDate32())
|
||||||
|
{
|
||||||
|
return llvm::ConstantInt::get(type, column.getInt(index));
|
||||||
|
}
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Cannot get native value for column with type {}",
|
||||||
|
column_type->getName());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
@ -4,65 +4,53 @@
|
|||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
# include <Common/Exception.h>
|
# include <Common/Exception.h>
|
||||||
|
# include <Core/ValueWithType.h>
|
||||||
# include <DataTypes/IDataType.h>
|
# include <DataTypes/IDataType.h>
|
||||||
# include <DataTypes/DataTypeNullable.h>
|
|
||||||
# include <Columns/ColumnConst.h>
|
|
||||||
# include <Columns/ColumnNullable.h>
|
|
||||||
# include <llvm/IR/IRBuilder.h>
|
# include <llvm/IR/IRBuilder.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int NOT_IMPLEMENTED;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool typeIsSigned(const IDataType & type)
|
/// Returns true if type is signed, false otherwise
|
||||||
|
bool typeIsSigned(const IDataType & type);
|
||||||
|
|
||||||
|
/// Cast LLVM type to nullable LLVM type
|
||||||
|
llvm::Type * toNullableType(llvm::IRBuilderBase & builder, llvm::Type * type);
|
||||||
|
|
||||||
|
/// Returns true if type can be native LLVM type, false otherwise
|
||||||
|
bool canBeNativeType(const IDataType & type);
|
||||||
|
|
||||||
|
/// Returns true if type can be native LLVM type, false otherwise
|
||||||
|
bool canBeNativeType(const DataTypePtr & type);
|
||||||
|
|
||||||
|
template <typename Type>
|
||||||
|
static inline bool canBeNativeType()
|
||||||
{
|
{
|
||||||
WhichDataType data_type(type);
|
if constexpr (std::is_same_v<Type, Int8> || std::is_same_v<Type, UInt8>)
|
||||||
return data_type.isNativeInt() || data_type.isFloat() || data_type.isEnum();
|
return true;
|
||||||
|
else if constexpr (std::is_same_v<Type, Int16> || std::is_same_v<Type, UInt16>)
|
||||||
|
return true;
|
||||||
|
else if constexpr (std::is_same_v<Type, Int32> || std::is_same_v<Type, UInt32>)
|
||||||
|
return true;
|
||||||
|
else if constexpr (std::is_same_v<Type, Int64> || std::is_same_v<Type, UInt64>)
|
||||||
|
return true;
|
||||||
|
else if constexpr (std::is_same_v<Type, Float32> || std::is_same_v<Type, Float64>)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline llvm::Type * toNullableType(llvm::IRBuilderBase & builder, llvm::Type * type)
|
/// Cast type to native LLVM type
|
||||||
{
|
llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDataType & type);
|
||||||
auto * is_null_type = builder.getInt1Ty();
|
|
||||||
return llvm::StructType::get(type, is_null_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDataType & type)
|
/// Cast type to native LLVM type
|
||||||
{
|
llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type);
|
||||||
WhichDataType data_type(type);
|
|
||||||
|
|
||||||
if (data_type.isNullable())
|
|
||||||
{
|
|
||||||
const auto & data_type_nullable = static_cast<const DataTypeNullable&>(type);
|
|
||||||
auto * wrapped = toNativeType(builder, *data_type_nullable.getNestedType());
|
|
||||||
auto * is_null_type = builder.getInt1Ty();
|
|
||||||
return wrapped ? llvm::StructType::get(wrapped, is_null_type) : nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// LLVM doesn't have unsigned types, it has unsigned instructions.
|
|
||||||
if (data_type.isInt8() || data_type.isUInt8())
|
|
||||||
return builder.getInt8Ty();
|
|
||||||
else if (data_type.isInt16() || data_type.isUInt16() || data_type.isDate())
|
|
||||||
return builder.getInt16Ty();
|
|
||||||
else if (data_type.isInt32() || data_type.isUInt32() || data_type.isDate32() || data_type.isDateTime())
|
|
||||||
return builder.getInt32Ty();
|
|
||||||
else if (data_type.isInt64() || data_type.isUInt64())
|
|
||||||
return builder.getInt64Ty();
|
|
||||||
else if (data_type.isFloat32())
|
|
||||||
return builder.getFloatTy();
|
|
||||||
else if (data_type.isFloat64())
|
|
||||||
return builder.getDoubleTy();
|
|
||||||
else if (data_type.isEnum8())
|
|
||||||
return builder.getInt8Ty();
|
|
||||||
else if (data_type.isEnum16())
|
|
||||||
return builder.getInt16Ty();
|
|
||||||
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename ToType>
|
template <typename ToType>
|
||||||
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder)
|
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder)
|
||||||
@ -80,203 +68,43 @@ static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder)
|
|||||||
else if constexpr (std::is_same_v<ToType, Float64>)
|
else if constexpr (std::is_same_v<ToType, Float64>)
|
||||||
return builder.getDoubleTy();
|
return builder.getDoubleTy();
|
||||||
|
|
||||||
return nullptr;
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid cast to native type");
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Type>
|
template <typename ToType>
|
||||||
static inline bool canBeNativeType()
|
static inline DataTypePtr toNativeDataType()
|
||||||
{
|
{
|
||||||
if constexpr (std::is_same_v<Type, Int8> || std::is_same_v<Type, UInt8>)
|
if constexpr (std::is_same_v<ToType, Int8> || std::is_same_v<ToType, UInt8> ||
|
||||||
return true;
|
std::is_same_v<ToType, Int16> || std::is_same_v<ToType, UInt16> ||
|
||||||
else if constexpr (std::is_same_v<Type, Int16> || std::is_same_v<Type, UInt16>)
|
std::is_same_v<ToType, Int32> || std::is_same_v<ToType, UInt32> ||
|
||||||
return true;
|
std::is_same_v<ToType, Int64> || std::is_same_v<ToType, UInt64> ||
|
||||||
else if constexpr (std::is_same_v<Type, Int32> || std::is_same_v<Type, UInt32>)
|
std::is_same_v<ToType, Float32> || std::is_same_v<ToType, Float64>)
|
||||||
return true;
|
return std::make_shared<DataTypeNumber<ToType>>();
|
||||||
else if constexpr (std::is_same_v<Type, Int64> || std::is_same_v<Type, UInt64>)
|
|
||||||
return true;
|
|
||||||
else if constexpr (std::is_same_v<Type, Float32>)
|
|
||||||
return true;
|
|
||||||
else if constexpr (std::is_same_v<Type, Float64>)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid cast to native data type");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool canBeNativeType(const IDataType & type)
|
/// Cast LLVM value with type to bool
|
||||||
{
|
llvm::Value * nativeBoolCast(llvm::IRBuilderBase & b, const DataTypePtr & from_type, llvm::Value * value);
|
||||||
WhichDataType data_type(type);
|
|
||||||
|
|
||||||
if (data_type.isNullable())
|
/// Cast LLVM value with type to bool
|
||||||
{
|
llvm::Value * nativeBoolCast(llvm::IRBuilderBase & b, const ValueWithType & value_with_type);
|
||||||
const auto & data_type_nullable = static_cast<const DataTypeNullable&>(type);
|
|
||||||
return canBeNativeType(*data_type_nullable.getNestedType());
|
|
||||||
}
|
|
||||||
|
|
||||||
return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate()
|
/// Cast LLVM value with type to specified type
|
||||||
|| data_type.isDate32() || data_type.isDateTime() || data_type.isEnum();
|
llvm::Value * nativeCast(llvm::IRBuilderBase & b, const DataTypePtr & from_type, llvm::Value * value, const DataTypePtr & to_type);
|
||||||
}
|
|
||||||
|
|
||||||
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type)
|
/// Cast LLVM value with type to specified type
|
||||||
{
|
llvm::Value * nativeCast(llvm::IRBuilderBase & b, const ValueWithType & value, const DataTypePtr & to_type);
|
||||||
return toNativeType(builder, *type);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline llvm::Value * nativeBoolCast(llvm::IRBuilder<> & b, const DataTypePtr & from_type, llvm::Value * value)
|
|
||||||
{
|
|
||||||
if (from_type->isNullable())
|
|
||||||
{
|
|
||||||
auto * inner = nativeBoolCast(b, removeNullable(from_type), b.CreateExtractValue(value, {0}));
|
|
||||||
return b.CreateAnd(b.CreateNot(b.CreateExtractValue(value, {1})), inner);
|
|
||||||
}
|
|
||||||
auto * zero = llvm::Constant::getNullValue(value->getType());
|
|
||||||
|
|
||||||
if (value->getType()->isIntegerTy())
|
|
||||||
return b.CreateICmpNE(value, zero);
|
|
||||||
if (value->getType()->isFloatingPointTy())
|
|
||||||
return b.CreateFCmpUNE(value, zero);
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast non-number {} to bool", from_type->getName());
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, const DataTypePtr & from, llvm::Value * value, llvm::Type * to_type)
|
|
||||||
{
|
|
||||||
auto * from_type = value->getType();
|
|
||||||
|
|
||||||
if (from_type == to_type)
|
|
||||||
return value;
|
|
||||||
else if (from_type->isIntegerTy() && to_type->isFloatingPointTy())
|
|
||||||
return typeIsSigned(*from) ? b.CreateSIToFP(value, to_type) : b.CreateUIToFP(value, to_type);
|
|
||||||
else if (from_type->isFloatingPointTy() && to_type->isIntegerTy())
|
|
||||||
return typeIsSigned(*from) ? b.CreateFPToSI(value, to_type) : b.CreateFPToUI(value, to_type);
|
|
||||||
else if (from_type->isIntegerTy() && to_type->isIntegerTy())
|
|
||||||
return b.CreateIntCast(value, to_type, typeIsSigned(*from));
|
|
||||||
else if (from_type->isFloatingPointTy() && to_type->isFloatingPointTy())
|
|
||||||
return b.CreateFPCast(value, to_type);
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast {} to requested type", from->getName());
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename FromType>
|
template <typename FromType>
|
||||||
static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, llvm::Value * value, llvm::Type * to_type)
|
static inline llvm::Value * nativeCast(llvm::IRBuilderBase & b, llvm::Value * value, const DataTypePtr & to)
|
||||||
{
|
{
|
||||||
auto * from_type = value->getType();
|
auto native_data_type = toNativeDataType<FromType>();
|
||||||
|
return nativeCast(b, native_data_type, value, to);
|
||||||
static constexpr bool from_type_is_signed = std::numeric_limits<FromType>::is_signed;
|
|
||||||
|
|
||||||
if (from_type == to_type)
|
|
||||||
return value;
|
|
||||||
else if (from_type->isIntegerTy() && to_type->isFloatingPointTy())
|
|
||||||
return from_type_is_signed ? b.CreateSIToFP(value, to_type) : b.CreateUIToFP(value, to_type);
|
|
||||||
else if (from_type->isFloatingPointTy() && to_type->isIntegerTy())
|
|
||||||
return from_type_is_signed ? b.CreateFPToSI(value, to_type) : b.CreateFPToUI(value, to_type);
|
|
||||||
else if (from_type->isIntegerTy() && to_type->isIntegerTy())
|
|
||||||
return b.CreateIntCast(value, to_type, from_type_is_signed);
|
|
||||||
else if (from_type->isFloatingPointTy() && to_type->isFloatingPointTy())
|
|
||||||
return b.CreateFPCast(value, to_type);
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast {} to requested type", TypeName<FromType>);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, const DataTypePtr & from, llvm::Value * value, const DataTypePtr & to)
|
/// Get column value for specified index as LLVM constant
|
||||||
{
|
llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builder, const DataTypePtr & column_type, const IColumn & column, size_t index);
|
||||||
auto * n_to = toNativeType(b, to);
|
|
||||||
|
|
||||||
if (value->getType() == n_to)
|
|
||||||
{
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
else if (from->isNullable() && to->isNullable())
|
|
||||||
{
|
|
||||||
auto * inner = nativeCast(b, removeNullable(from), b.CreateExtractValue(value, {0}), to);
|
|
||||||
return b.CreateInsertValue(inner, b.CreateExtractValue(value, {1}), {1});
|
|
||||||
}
|
|
||||||
else if (from->isNullable())
|
|
||||||
{
|
|
||||||
return nativeCast(b, removeNullable(from), b.CreateExtractValue(value, {0}), to);
|
|
||||||
}
|
|
||||||
else if (to->isNullable())
|
|
||||||
{
|
|
||||||
auto * inner = nativeCast(b, from, value, removeNullable(to));
|
|
||||||
return b.CreateInsertValue(llvm::Constant::getNullValue(n_to), inner, {0});
|
|
||||||
}
|
|
||||||
|
|
||||||
return nativeCast(b, from, value, n_to);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline std::pair<llvm::Value *, llvm::Value *> nativeCastToCommon(llvm::IRBuilder<> & b, const DataTypePtr & lhs_type, llvm::Value * lhs, const DataTypePtr & rhs_type, llvm::Value * rhs) /// NOLINT
|
|
||||||
{
|
|
||||||
llvm::Type * common;
|
|
||||||
|
|
||||||
bool lhs_is_signed = typeIsSigned(*lhs_type);
|
|
||||||
bool rhs_is_signed = typeIsSigned(*rhs_type);
|
|
||||||
|
|
||||||
if (lhs->getType()->isIntegerTy() && rhs->getType()->isIntegerTy())
|
|
||||||
{
|
|
||||||
/// if one integer has a sign bit, make sure the other does as well. llvm generates optimal code
|
|
||||||
/// (e.g. uses overflow flag on x86) for (word size + 1)-bit integer operations.
|
|
||||||
|
|
||||||
size_t lhs_bit_width = lhs->getType()->getIntegerBitWidth() + (!lhs_is_signed && rhs_is_signed);
|
|
||||||
size_t rhs_bit_width = rhs->getType()->getIntegerBitWidth() + (!rhs_is_signed && lhs_is_signed);
|
|
||||||
|
|
||||||
size_t max_bit_width = std::max(lhs_bit_width, rhs_bit_width);
|
|
||||||
common = b.getIntNTy(static_cast<unsigned>(max_bit_width));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/// TODO: Check
|
|
||||||
/// (double, float) or (double, int_N where N <= double's mantissa width) -> double
|
|
||||||
common = b.getDoubleTy();
|
|
||||||
}
|
|
||||||
|
|
||||||
auto * cast_lhs_to_common = nativeCast(b, lhs_type, lhs, common);
|
|
||||||
auto * cast_rhs_to_common = nativeCast(b, rhs_type, rhs, common);
|
|
||||||
|
|
||||||
return std::make_pair(cast_lhs_to_common, cast_rhs_to_common);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builder, const DataTypePtr & column_type, const IColumn & column, size_t index)
|
|
||||||
{
|
|
||||||
if (const auto * constant = typeid_cast<const ColumnConst *>(&column))
|
|
||||||
{
|
|
||||||
return getColumnNativeValue(builder, column_type, constant->getDataColumn(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
WhichDataType column_data_type(column_type);
|
|
||||||
|
|
||||||
auto * type = toNativeType(builder, column_type);
|
|
||||||
|
|
||||||
if (!type || column.size() <= index)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
if (column_data_type.isNullable())
|
|
||||||
{
|
|
||||||
const auto & nullable_data_type = assert_cast<const DataTypeNullable &>(*column_type);
|
|
||||||
const auto & nullable_column = assert_cast<const ColumnNullable &>(column);
|
|
||||||
|
|
||||||
auto * value = getColumnNativeValue(builder, nullable_data_type.getNestedType(), nullable_column.getNestedColumn(), index);
|
|
||||||
auto * is_null = llvm::ConstantInt::get(type->getContainedType(1), nullable_column.isNullAt(index));
|
|
||||||
|
|
||||||
return value ? llvm::ConstantStruct::get(static_cast<llvm::StructType *>(type), value, is_null) : nullptr;
|
|
||||||
}
|
|
||||||
else if (column_data_type.isFloat32())
|
|
||||||
{
|
|
||||||
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float32> &>(column).getElement(index));
|
|
||||||
}
|
|
||||||
else if (column_data_type.isFloat64())
|
|
||||||
{
|
|
||||||
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float64> &>(column).getElement(index));
|
|
||||||
}
|
|
||||||
else if (column_data_type.isNativeUInt() || column_data_type.isDate() || column_data_type.isDateTime())
|
|
||||||
{
|
|
||||||
return llvm::ConstantInt::get(type, column.getUInt(index));
|
|
||||||
}
|
|
||||||
else if (column_data_type.isNativeInt() || column_data_type.isEnum() || column_data_type.isDate32())
|
|
||||||
{
|
|
||||||
return llvm::ConstantInt::get(type, column.getInt(index));
|
|
||||||
}
|
|
||||||
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user