mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'master' into Docs/ip_addresses
This commit is contained in:
commit
c856c4a7df
1
.github/workflows/woboq.yml
vendored
1
.github/workflows/woboq.yml
vendored
@ -12,6 +12,7 @@ jobs:
|
||||
# don't use dockerhub push because this image updates so rarely
|
||||
WoboqCodebrowser:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
timeout-minutes: 420 # the task is pretty heavy, so there's an additional hour
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
|
@ -6,8 +6,10 @@ rules:
|
||||
level: warning
|
||||
indent-sequences: consistent
|
||||
line-length:
|
||||
# there are some bash -c "", so this is OK
|
||||
max: 300
|
||||
# there are:
|
||||
# - bash -c "", so this is OK
|
||||
# - yaml in tests
|
||||
max: 1000
|
||||
level: warning
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
|
11
README.md
11
README.md
@ -22,12 +22,13 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [**v23.5 Release Webinar**](https://clickhouse.com/company/events/v23-5-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-05) - Jun 8 - 23.5 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup in Bangalore**](https://www.meetup.com/clickhouse-bangalore-user-group/events/293740066/) - Jun 7
|
||||
* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/293426725/) - Jun 7
|
||||
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Jul 4
|
||||
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
||||
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
||||
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
||||
|
||||
|
||||
Also, keep an eye out for upcoming meetups in Amsterdam, Boston, NYC, Beijing, and Toronto. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <array>
|
||||
|
||||
#if defined(__SSE2__)
|
||||
#include <emmintrin.h>
|
||||
|
@ -11,3 +11,8 @@ constexpr double interpolateExponential(double min, double max, double ratio)
|
||||
assert(min > 0 && ratio >= 0 && ratio <= 1);
|
||||
return min * std::pow(max / min, ratio);
|
||||
}
|
||||
|
||||
constexpr double interpolateLinear(double min, double max, double ratio)
|
||||
{
|
||||
return std::lerp(min, max, ratio);
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ if (SANITIZE OR NOT (
|
||||
))
|
||||
if (ENABLE_JEMALLOC)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL}
|
||||
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds.")
|
||||
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds. Use -DENABLE_JEMALLOC=0")
|
||||
endif ()
|
||||
set (ENABLE_JEMALLOC OFF)
|
||||
else ()
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="23.5.2.7"
|
||||
ARG VERSION="23.5.3.24"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.5.2.7"
|
||||
ARG VERSION="23.5.3.24"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.5.2.7"
|
||||
ARG VERSION="23.5.3.24"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -20,6 +20,7 @@ For more information and documentation see https://clickhouse.com/.
|
||||
|
||||
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
|
||||
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A). Most ARM CPUs after 2017 support ARMv8.2-A. A notable exception is Raspberry Pi 4 from 2019 whose CPU only supports ARMv8.0-A.
|
||||
- Since the Clickhouse 23.3 Ubuntu image started using `ubuntu:22.04` as its base image, it requires docker version >= `20.10.10`, or use `docker run -- privileged` instead. Alternatively, try the Clickhouse Alpine image.
|
||||
|
||||
## How to use this image
|
||||
|
||||
|
@ -12,10 +12,10 @@ RUN apt-get update --yes && \
|
||||
# We need to get the repository's HEAD each time despite, so we invalidate layers' cache
|
||||
ARG CACHE_INVALIDATOR=0
|
||||
RUN mkdir /sqlancer && \
|
||||
wget -q -O- https://github.com/sqlancer/sqlancer/archive/master.tar.gz | \
|
||||
wget -q -O- https://github.com/sqlancer/sqlancer/archive/main.tar.gz | \
|
||||
tar zx -C /sqlancer && \
|
||||
cd /sqlancer/sqlancer-master && \
|
||||
mvn package -DskipTests && \
|
||||
cd /sqlancer/sqlancer-main && \
|
||||
mvn --no-transfer-progress package -DskipTests && \
|
||||
rm -r /root/.m2
|
||||
|
||||
COPY run.sh /
|
||||
|
@ -16,7 +16,6 @@ def process_result(result_folder):
|
||||
"TLPGroupBy",
|
||||
"TLPHaving",
|
||||
"TLPWhere",
|
||||
"TLPWhereGroupBy",
|
||||
"NoREC",
|
||||
]
|
||||
failed_tests = []
|
||||
|
@ -33,7 +33,7 @@ cd /workspace
|
||||
|
||||
for _ in $(seq 1 60); do if [[ $(wget -q 'localhost:8123' -O-) == 'Ok.' ]]; then break ; else sleep 1; fi ; done
|
||||
|
||||
cd /sqlancer/sqlancer-master
|
||||
cd /sqlancer/sqlancer-main
|
||||
|
||||
TIMEOUT=300
|
||||
NUM_QUERIES=1000
|
||||
|
@ -59,6 +59,8 @@ install_packages previous_release_package_folder
|
||||
# available for dump via clickhouse-local
|
||||
configure
|
||||
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
|
||||
start
|
||||
@ -85,6 +87,8 @@ export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
|
||||
start
|
||||
@ -115,6 +119,13 @@ mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/c
|
||||
install_packages package_folder
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
|
||||
# Just in case previous version left some garbage in zk
|
||||
sudo cat /etc/clickhouse-server/config.d/lost_forever_check.xml \
|
||||
| sed "s|>1<|>0<|g" \
|
||||
> /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp /etc/clickhouse-server/config.d/lost_forever_check.xml
|
||||
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|
||||
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \
|
||||
|
19
docs/changelogs/v22.8.19.10-lts.md
Normal file
19
docs/changelogs/v22.8.19.10-lts.md
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v22.8.19.10-lts (989bc2fe8b0) FIXME as compared to v22.8.18.31-lts (4de7a95a544)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
22
docs/changelogs/v23.3.4.17-lts.md
Normal file
22
docs/changelogs/v23.3.4.17-lts.md
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.4.17-lts (2c99b73ff40) FIXME as compared to v23.3.3.52-lts (cb963c474db)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix crash when Pool::Entry::disconnect() is called [#50334](https://github.com/ClickHouse/ClickHouse/pull/50334) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
* Avoid storing logs in Keeper containing unknown operation [#50751](https://github.com/ClickHouse/ClickHouse/pull/50751) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
22
docs/changelogs/v23.4.4.16-stable.md
Normal file
22
docs/changelogs/v23.4.4.16-stable.md
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.4.4.16-stable (747ba4fc6a0) FIXME as compared to v23.4.3.48-stable (d9199f8d3cc)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix crash when Pool::Entry::disconnect() is called [#50334](https://github.com/ClickHouse/ClickHouse/pull/50334) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
* Fix iceberg V2 optional metadata parsing [#50974](https://github.com/ClickHouse/ClickHouse/pull/50974) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
26
docs/changelogs/v23.5.3.24-stable.md
Normal file
26
docs/changelogs/v23.5.3.24-stable.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.5.3.24-stable (76f54616d3b) FIXME as compared to v23.5.2.7-stable (5751aa1ab9f)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix Log family table return wrong rows count after truncate [#50585](https://github.com/ClickHouse/ClickHouse/pull/50585) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix bug in `uniqExact` parallel merging [#50590](https://github.com/ClickHouse/ClickHouse/pull/50590) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Revert recent grace hash join changes [#50699](https://github.com/ClickHouse/ClickHouse/pull/50699) ([vdimir](https://github.com/vdimir)).
|
||||
* Avoid storing logs in Keeper containing unknown operation [#50751](https://github.com/ClickHouse/ClickHouse/pull/50751) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add compat setting for non-const timezones [#50834](https://github.com/ClickHouse/ClickHouse/pull/50834) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix iceberg V2 optional metadata parsing [#50974](https://github.com/ClickHouse/ClickHouse/pull/50974) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
@ -233,6 +233,12 @@ libhdfs3 support HDFS namenode HA.
|
||||
- `_path` — Path to the file.
|
||||
- `_file` — Name of the file.
|
||||
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
|
@ -35,6 +35,10 @@ The table structure can differ from the original MySQL table structure:
|
||||
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types.
|
||||
- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
|
||||
|
||||
:::note
|
||||
The MySQL Table Engine is currently not available on the ClickHouse builds for MacOS ([issue](https://github.com/ClickHouse/ClickHouse/issues/21191))
|
||||
:::
|
||||
|
||||
**Engine Parameters**
|
||||
|
||||
- `host:port` — MySQL server address.
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/redis
|
||||
slug: /en/engines/table-engines/integrations/redis
|
||||
sidebar_position: 43
|
||||
sidebar_label: Redis
|
||||
---
|
||||
@ -34,7 +34,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name
|
||||
- `primary` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a Redis key.
|
||||
|
||||
- columns other than the primary key will be serialized in binary as Redis value in corresponding order.
|
||||
|
||||
|
||||
- queries with key equals or in filtering will be optimized to multi keys lookup from Redis. If queries without filtering key full table scan will happen which is a heavy operation.
|
||||
|
||||
## Usage Example {#usage-example}
|
||||
|
@ -127,6 +127,12 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/*', 'CSV');
|
||||
```
|
||||
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
||||
## S3-related Settings {#settings}
|
||||
|
||||
The following settings can be set before query execution or placed into configuration file.
|
||||
|
@ -853,7 +853,7 @@ Tags:
|
||||
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
|
||||
- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved.
|
||||
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
||||
- `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3).
|
||||
- `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default (if enabled) if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3). If disabled then already expired data part is written into a default volume and then right after moved to TTL volume.
|
||||
- `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`.
|
||||
|
||||
Configuration examples:
|
||||
|
@ -92,3 +92,11 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
|
||||
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
||||
## Settings {#settings}
|
||||
|
||||
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
||||
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||
|
@ -102,3 +102,7 @@ SELECT * FROM url_engine_table
|
||||
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
@ -470,6 +470,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
||||
- [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`.
|
||||
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
||||
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
||||
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
|
||||
@ -1877,13 +1878,13 @@ The table below shows supported data types and how they match ClickHouse [data t
|
||||
| `string (uuid)` \** | [UUID](/docs/en/sql-reference/data-types/uuid.md) | `string (uuid)` \** |
|
||||
| `fixed(16)` | [Int128/UInt128](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(16)` |
|
||||
| `fixed(32)` | [Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(32)` |
|
||||
| `record` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `record` |
|
||||
|
||||
|
||||
|
||||
\* `bytes` is default, controlled by [output_format_avro_string_column_pattern](/docs/en/operations/settings/settings-formats.md/#output_format_avro_string_column_pattern)
|
||||
\** [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
|
||||
|
||||
Unsupported Avro data types: `record` (non-root), `map`
|
||||
|
||||
Unsupported Avro logical data types: `time-millis`, `time-micros`, `duration`
|
||||
|
||||
### Inserting Data {#inserting-data-1}
|
||||
@ -1922,7 +1923,26 @@ Output Avro file compression and sync interval can be configured with [output_fo
|
||||
|
||||
Using the ClickHouse [DESCRIBE](/docs/en/sql-reference/statements/describe-table) function, you can quickly view the inferred format of an Avro file like the following example. This example includes the URL of a publicly accessible Avro file in the ClickHouse S3 public bucket:
|
||||
|
||||
``` DESCRIBE url('https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits.avro','Avro');
|
||||
```
|
||||
DESCRIBE url('https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits.avro','Avro);
|
||||
```
|
||||
```
|
||||
┌─name───────────────────────┬─type────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ WatchID │ Int64 │ │ │ │ │ │
|
||||
│ JavaEnable │ Int32 │ │ │ │ │ │
|
||||
│ Title │ String │ │ │ │ │ │
|
||||
│ GoodEvent │ Int32 │ │ │ │ │ │
|
||||
│ EventTime │ Int32 │ │ │ │ │ │
|
||||
│ EventDate │ Date32 │ │ │ │ │ │
|
||||
│ CounterID │ Int32 │ │ │ │ │ │
|
||||
│ ClientIP │ Int32 │ │ │ │ │ │
|
||||
│ ClientIP6 │ FixedString(16) │ │ │ │ │ │
|
||||
│ RegionID │ Int32 │ │ │ │ │ │
|
||||
...
|
||||
│ IslandID │ FixedString(16) │ │ │ │ │ │
|
||||
│ RequestNum │ Int32 │ │ │ │ │ │
|
||||
│ RequestTry │ Int32 │ │ │ │ │ │
|
||||
└────────────────────────────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
## AvroConfluent {#data-format-avro-confluent}
|
||||
|
@ -83,6 +83,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
||||
- [`compression_method`](/docs/en/sql-reference/statements/create/table.md/#column-compression-codecs) and compression_level
|
||||
- `password` for the file on disk
|
||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
|
||||
|
||||
### Usage examples
|
||||
|
||||
@ -398,4 +399,4 @@ To disallow concurrent backup/restore, you can use these settings respectively.
|
||||
```
|
||||
|
||||
The default value for both is true, so by default concurrent backup/restores are allowed.
|
||||
When these settings are false on a cluster, only 1 backup/restore is allowed to run on a cluster at a time.
|
||||
When these settings are false on a cluster, only 1 backup/restore is allowed to run on a cluster at a time.
|
||||
|
@ -50,7 +50,7 @@ To manage named collections with DDL a user must have the `named_control_collect
|
||||
```
|
||||
|
||||
:::tip
|
||||
In the above example the `passowrd_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user.
|
||||
In the above example the `password_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user.
|
||||
:::
|
||||
|
||||
## Storing named collections in configuration files
|
||||
|
@ -1,16 +0,0 @@
|
||||
---
|
||||
slug: /en/operations/server-configuration-parameters/
|
||||
sidebar_position: 54
|
||||
sidebar_label: Server Configuration Parameters
|
||||
pagination_next: en/operations/server-configuration-parameters/settings
|
||||
---
|
||||
|
||||
# Server Configuration Parameters
|
||||
|
||||
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||
|
||||
These settings are stored in the `config.xml` file on the ClickHouse server.
|
||||
|
||||
Other settings are described in the “[Settings](../../operations/settings/index.md#session-settings-intro)” section.
|
||||
|
||||
Before studying the settings, read the [Configuration files](../../operations/configuration-files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes).
|
@ -7,6 +7,14 @@ description: This section contains descriptions of server settings that cannot b
|
||||
|
||||
# Server Settings
|
||||
|
||||
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||
|
||||
These settings are stored in the `config.xml` file on the ClickHouse server.
|
||||
|
||||
Other settings are described in the “[Settings](../../operations/settings/index.md#session-settings-intro)” section.
|
||||
|
||||
Before studying the settings, read the [Configuration files](../../operations/configuration-files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes).
|
||||
|
||||
## allow_use_jemalloc_memory
|
||||
|
||||
Allows to use jemalloc memory.
|
||||
@ -202,7 +210,7 @@ Default: 15
|
||||
|
||||
## dns_max_consecutive_failures
|
||||
|
||||
Max connection failures before dropping host from ClickHouse DNS cache
|
||||
Max consecutive resolving failures before dropping a host from ClickHouse DNS cache
|
||||
|
||||
Type: UInt32
|
||||
|
||||
|
@ -932,6 +932,38 @@ Result
|
||||
" string "
|
||||
```
|
||||
|
||||
### input_format_csv_allow_whitespace_or_tab_as_delimiter {#input_format_csv_allow_whitespace_or_tab_as_delimiter}
|
||||
|
||||
Allow to use whitespace or tab as field delimiter in CSV strings.
|
||||
|
||||
Default value: `false`.
|
||||
|
||||
**Examples**
|
||||
|
||||
Query
|
||||
|
||||
```bash
|
||||
echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_allow_whitespace_or_tab_as_delimiter=true --format_csv_delimiter=' '
|
||||
```
|
||||
|
||||
Result
|
||||
|
||||
```text
|
||||
a b
|
||||
```
|
||||
|
||||
Query
|
||||
|
||||
```bash
|
||||
echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_allow_whitespace_or_tab_as_delimiter=true --format_csv_delimiter='\t'
|
||||
```
|
||||
|
||||
Result
|
||||
|
||||
```text
|
||||
a b
|
||||
```
|
||||
|
||||
## Values format settings {#values-format-settings}
|
||||
|
||||
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
||||
|
@ -2941,7 +2941,7 @@ Default value: `0`.
|
||||
|
||||
## mutations_sync {#mutations_sync}
|
||||
|
||||
Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
|
||||
Allows to execute `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -3328,7 +3328,35 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## s3_truncate_on_insert
|
||||
## engine_file_allow_create_multiple_files {#engine_file_allow_create_multiple_files}
|
||||
|
||||
Enables or disables creating a new file on each insert in file engine tables if the format has the suffix (`JSON`, `ORC`, `Parquet`, etc.). If enabled, on each insert a new file will be created with a name following this pattern:
|
||||
|
||||
`data.Parquet` -> `data.1.Parquet` -> `data.2.Parquet`, etc.
|
||||
|
||||
Possible values:
|
||||
- 0 — `INSERT` query appends new data to the end of the file.
|
||||
- 1 — `INSERT` query creates a new file.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## engine_file_skip_empty_files {#engine_file_skip_empty_files}
|
||||
|
||||
Enables or disables skipping empty files in [File](../../engines/table-engines/special/file.md) engine tables.
|
||||
|
||||
Possible values:
|
||||
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||
- 1 — `SELECT` returns empty result for empty file.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## storage_file_read_method {#storage_file_read_method}
|
||||
|
||||
Method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local).
|
||||
|
||||
Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||
|
||||
## s3_truncate_on_insert {#s3_truncate_on_insert}
|
||||
|
||||
Enables or disables truncate before inserts in s3 engine tables. If disabled, an exception will be thrown on insert attempts if an S3 object already exists.
|
||||
|
||||
@ -3338,7 +3366,29 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## hdfs_truncate_on_insert
|
||||
## s3_create_new_file_on_insert {#s3_create_new_file_on_insert}
|
||||
|
||||
Enables or disables creating a new file on each insert in s3 engine tables. If enabled, on each insert a new S3 object will be created with the key, similar to this pattern:
|
||||
|
||||
initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
|
||||
|
||||
Possible values:
|
||||
- 0 — `INSERT` query appends new data to the end of the file.
|
||||
- 1 — `INSERT` query creates a new file.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## s3_skip_empty_files {#s3_skip_empty_files}
|
||||
|
||||
Enables or disables skipping empty files in [S3](../../engines/table-engines/integrations/s3.md) engine tables.
|
||||
|
||||
Possible values:
|
||||
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||
- 1 — `SELECT` returns empty result for empty file.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## hdfs_truncate_on_insert {#hdfs_truncate_on_insert}
|
||||
|
||||
Enables or disables truncation before an insert in hdfs engine tables. If disabled, an exception will be thrown on an attempt to insert if a file in HDFS already exists.
|
||||
|
||||
@ -3348,31 +3398,7 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## engine_file_allow_create_multiple_files
|
||||
|
||||
Enables or disables creating a new file on each insert in file engine tables if the format has the suffix (`JSON`, `ORC`, `Parquet`, etc.). If enabled, on each insert a new file will be created with a name following this pattern:
|
||||
|
||||
`data.Parquet` -> `data.1.Parquet` -> `data.2.Parquet`, etc.
|
||||
|
||||
Possible values:
|
||||
- 0 — `INSERT` query appends new data to the end of the file.
|
||||
- 1 — `INSERT` query replaces existing content of the file with the new data.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## s3_create_new_file_on_insert
|
||||
|
||||
Enables or disables creating a new file on each insert in s3 engine tables. If enabled, on each insert a new S3 object will be created with the key, similar to this pattern:
|
||||
|
||||
initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
|
||||
|
||||
Possible values:
|
||||
- 0 — `INSERT` query appends new data to the end of the file.
|
||||
- 1 — `INSERT` query replaces existing content of the file with the new data.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## hdfs_create_new_file_on_insert
|
||||
## hdfs_create_new_file_on_insert {#hdfs_create_new_file_on_insert
|
||||
|
||||
Enables or disables creating a new file on each insert in HDFS engine tables. If enabled, on each insert a new HDFS file will be created with the name, similar to this pattern:
|
||||
|
||||
@ -3380,7 +3406,27 @@ initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
|
||||
|
||||
Possible values:
|
||||
- 0 — `INSERT` query appends new data to the end of the file.
|
||||
- 1 — `INSERT` query replaces existing content of the file with the new data.
|
||||
- 1 — `INSERT` query creates a new file.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## hdfs_skip_empty_files {#hdfs_skip_empty_files}
|
||||
|
||||
Enables or disables skipping empty files in [HDFS](../../engines/table-engines/integrations/hdfs.md) engine tables.
|
||||
|
||||
Possible values:
|
||||
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||
- 1 — `SELECT` returns empty result for empty file.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## engine_url_skip_empty_files {#engine_url_skip_empty_files}
|
||||
|
||||
Enables or disables skipping empty files in [URL](../../engines/table-engines/special/url.md) engine tables.
|
||||
|
||||
Possible values:
|
||||
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
|
||||
- 1 — `SELECT` returns empty result for empty file.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
|
@ -11,7 +11,8 @@ Columns:
|
||||
- `host` ([String](../../sql-reference/data-types/string.md)) — The hostname/IP of the ZooKeeper node that ClickHouse connected to.
|
||||
- `port` ([String](../../sql-reference/data-types/string.md)) — The port of the ZooKeeper node that ClickHouse connected to.
|
||||
- `index` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config.
|
||||
- `connected_time` ([String](../../sql-reference/data-types/string.md)) — When the connection was established
|
||||
- `connected_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — When the connection was established
|
||||
- `session_uptime_elapsed_seconds` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Seconds elapsed since the connection was established
|
||||
- `is_expired` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Is the current connection expired.
|
||||
- `keeper_api_version` ([String](../../sql-reference/data-types/string.md)) — Keeper API version.
|
||||
- `client_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Session id of the connection.
|
||||
@ -23,7 +24,7 @@ SELECT * FROM system.zookeeper_connection;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name──────────────┬─host─────────┬─port─┬─index─┬──────connected_time─┬─is_expired─┬─keeper_api_version─┬──────────client_id─┐
|
||||
│ default_zookeeper │ 127.0.0.1 │ 2181 │ 0 │ 2023-05-19 14:30:16 │ 0 │ 0 │ 216349144108826660 │
|
||||
└───────────────────┴──────────────┴──────┴───────┴─────────────────────┴────────────┴────────────────────┴────────────────────┘
|
||||
┌─name────┬─host──────┬─port─┬─index─┬──────connected_time─┬─session_uptime_elapsed_seconds─┬─is_expired─┬─keeper_api_version─┬─client_id─┐
|
||||
│ default │ 127.0.0.1 │ 9181 │ 0 │ 2023-06-15 14:36:01 │ 3058 │ 0 │ 3 │ 5 │
|
||||
└─────────┴───────────┴──────┴───────┴─────────────────────┴────────────────────────────────┴────────────┴────────────────────┴───────────┘
|
||||
```
|
||||
|
@ -32,7 +32,7 @@ For example, Decimal32(4) can contain numbers from -99999.9999 to 99999.9999 wit
|
||||
|
||||
Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string.
|
||||
|
||||
Because modern CPUs do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64.
|
||||
Because modern CPUs do not support 128-bit and 256-bit integers natively, operations on Decimal128 and Decimal256 are emulated. Thus, Decimal128 and Decimal256 work significantly slower than Decimal32/Decimal64.
|
||||
|
||||
## Operations and Result Type
|
||||
|
||||
@ -59,6 +59,10 @@ Some functions on Decimal return result as Float64 (for example, var or stddev).
|
||||
|
||||
During calculations on Decimal, integer overflows might happen. Excessive digits in a fraction are discarded (not rounded). Excessive digits in integer part will lead to an exception.
|
||||
|
||||
:::warning
|
||||
Overflow check is not implemented for Decimal128 and Decimal256. In case of overflow incorrect result is returned, no exception is thrown.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
SELECT toDecimal32(2, 4) AS x, x / 3
|
||||
```
|
||||
|
@ -1509,10 +1509,12 @@ parseDateTimeBestEffort(time_string [, time_zone])
|
||||
- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
|
||||
- A string with a date and a time component: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
|
||||
- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc.
|
||||
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `YYYY-MM` are substituted as `2000-01`.
|
||||
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `MM` is substituted by `01`.
|
||||
- A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`.
|
||||
- A [syslog timestamp](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2): `Mmm dd hh:mm:ss`. For example, `Jun 9 14:20:32`.
|
||||
|
||||
For all of the formats with separator the function parses months names expressed by their full name or by the first three letters of a month name. Examples: `24/DEC/18`, `24-Dec-18`, `01-September-2018`.
|
||||
If the year is not specified, it is considered to be equal to the current year. If the resulting DateTime happen to be in the future (even by a second after the current moment), then the current year is substituted by the previous year.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -1583,23 +1585,46 @@ Result:
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT parseDateTimeBestEffort('10 20:19');
|
||||
SELECT toYear(now()) as year, parseDateTimeBestEffort('10 20:19');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─parseDateTimeBestEffort('10 20:19')─┐
|
||||
│ 2000-01-10 20:19:00 │
|
||||
└─────────────────────────────────────┘
|
||||
┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐
|
||||
│ 2023 │ 2023-01-10 20:19:00 │
|
||||
└──────┴─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
now() AS ts_now,
|
||||
formatDateTime(ts_around, '%b %e %T') AS syslog_arg
|
||||
SELECT
|
||||
ts_now,
|
||||
syslog_arg,
|
||||
parseDateTimeBestEffort(syslog_arg)
|
||||
FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌──────────────ts_now─┬─syslog_arg──────┬─parseDateTimeBestEffort(syslog_arg)─┐
|
||||
│ 2023-06-30 23:59:30 │ Jun 30 23:59:00 │ 2023-06-30 23:59:00 │
|
||||
│ 2023-06-30 23:59:30 │ Jul 1 00:00:00 │ 2022-07-01 00:00:00 │
|
||||
└─────────────────────┴─────────────────┴─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [RFC 1123](https://tools.ietf.org/html/rfc1123)
|
||||
- [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123)
|
||||
- [toDate](#todate)
|
||||
- [toDateTime](#todatetime)
|
||||
- [ISO 8601 announcement by @xkcd](https://xkcd.com/1179/)
|
||||
- [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2)
|
||||
|
||||
## parseDateTimeBestEffortUS
|
||||
|
||||
|
@ -232,6 +232,7 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
||||
|
||||
Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`).
|
||||
It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive.
|
||||
Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
Syntax:
|
||||
|
||||
|
@ -60,7 +60,7 @@ You can specify how long (in seconds) to wait for inactive replicas to execute a
|
||||
For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
|
||||
:::
|
||||
|
||||
For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
||||
For `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
||||
|
||||
## Related content
|
||||
|
||||
|
@ -142,19 +142,19 @@ The following operations with [projections](/docs/en/engines/table-engines/merge
|
||||
|
||||
## ADD PROJECTION
|
||||
|
||||
`ALTER TABLE [db].name ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||
`ALTER TABLE [db.]name [ON CLUSTER cluster] ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||
|
||||
## DROP PROJECTION
|
||||
|
||||
`ALTER TABLE [db].name DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
`ALTER TABLE [db.]name [ON CLUSTER cluster] DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
## MATERIALIZE PROJECTION
|
||||
|
||||
`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
`ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
## CLEAR PROJECTION
|
||||
|
||||
`ALTER TABLE [db.]table CLEAR PROJECTION [IF EXISTS] name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
`ALTER TABLE [db.]table [ON CLUSTER cluster] CLEAR PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
|
||||
The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files.
|
||||
|
@ -10,15 +10,25 @@ sidebar_label: INDEX
|
||||
|
||||
The following operations are available:
|
||||
|
||||
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] ADD INDEX name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata.
|
||||
## ADD INDEX
|
||||
|
||||
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] ADD INDEX [IF NOT EXISTS] name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata.
|
||||
|
||||
- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
|
||||
## DROP INDEX
|
||||
|
||||
The first two commands are lightweight in a sense that they only change metadata or remove files.
|
||||
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] DROP INDEX [IF EXISTS] name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
Also, they are replicated, syncing indices metadata via ZooKeeper.
|
||||
## MATERIALIZE INDEX
|
||||
|
||||
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
|
||||
|
||||
## CLEAR INDEX
|
||||
|
||||
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] CLEAR INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Deletes the secondary index files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
|
||||
The commands `ADD`, `DROP`, and `CLEAR` are lightweight in the sense that they only change metadata or remove files.
|
||||
Also, they are replicated, syncing indices metadata via ClickHouse Keeper or ZooKeeper.
|
||||
|
||||
:::note
|
||||
Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants).
|
||||
|
@ -82,6 +82,35 @@ LIFETIME(MIN 0 MAX 1000)
|
||||
LAYOUT(FLAT())
|
||||
```
|
||||
|
||||
:::note
|
||||
When using the SQL console in [ClickHouse Cloud](https://clickhouse.com), you must specify a user (`default` or any other user with the role `default_role`) and password when creating a dictionary.
|
||||
:::note
|
||||
|
||||
```sql
|
||||
CREATE USER IF NOT EXISTS clickhouse_admin
|
||||
IDENTIFIED WITH sha256_password BY 'passworD43$x';
|
||||
|
||||
GRANT default_role TO clickhouse_admin;
|
||||
|
||||
CREATE DATABASE foo_db;
|
||||
|
||||
CREATE TABLE foo_db.source_table (
|
||||
id UInt64,
|
||||
value String
|
||||
) ENGINE = MergeTree
|
||||
PRIMARY KEY id;
|
||||
|
||||
CREATE DICTIONARY foo_db.id_value_dictionary
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(TABLE 'source_table' USER 'clickhouse_admin' PASSWORD 'passworD43$x' DB 'foo_db' ))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000);
|
||||
```
|
||||
|
||||
### Create a dictionary from a table in a remote ClickHouse service
|
||||
|
||||
Input table (in the remote ClickHouse service) `source_table`:
|
||||
|
@ -380,11 +380,15 @@ High compression levels are useful for asymmetric scenarios, like compress once,
|
||||
|
||||
`DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply:
|
||||
|
||||
- DEFLATE_QPL is experimental and can only be used after setting configuration parameter `allow_experimental_codecs=1`.
|
||||
- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_deflate_qpl_codec = 1`.
|
||||
- DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details.
|
||||
- DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details.
|
||||
- DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled.
|
||||
|
||||
:::note
|
||||
DEFLATE_QPL is not available in ClickHouse Cloud.
|
||||
:::
|
||||
|
||||
### Specialized Codecs
|
||||
|
||||
These codecs are designed to make compression more effective by using specific features of data. Some of these codecs do not compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation.
|
||||
|
@ -55,6 +55,9 @@ With the described implementation now we can see what can negatively affect 'DEL
|
||||
- Table having a very large number of data parts
|
||||
- Having a lot of data in Compact parts—in a Compact part, all columns are stored in one file.
|
||||
|
||||
:::note
|
||||
Currently, Lightweight delete does not work for tables with projection as rows in projection may be affected and require the projection to be rebuilt. Rebuilding projection makes the deletion not lightweight, so this is not supported.
|
||||
:::
|
||||
|
||||
## Related content
|
||||
|
||||
|
@ -10,7 +10,7 @@ sidebar_label: SET
|
||||
SET param = value
|
||||
```
|
||||
|
||||
Assigns `value` to the `param` [setting](../../operations/settings/index.md) for the current session. You cannot change [server settings](../../operations/server-configuration-parameters/index.md) this way.
|
||||
Assigns `value` to the `param` [setting](../../operations/settings/index.md) for the current session. You cannot change [server settings](../../operations/server-configuration-parameters/settings.md) this way.
|
||||
|
||||
You can also set all the values from the specified settings profile in a single query.
|
||||
|
||||
|
@ -196,6 +196,16 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3
|
||||
- `_path` — Path to the file.
|
||||
- `_file` — Name of the file.
|
||||
|
||||
## Settings
|
||||
|
||||
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
||||
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||
|
||||
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/gcs
|
||||
sidebar_position: 45
|
||||
sidebar_label: s3
|
||||
sidebar_label: gcs
|
||||
keywords: [gcs, bucket]
|
||||
---
|
||||
|
||||
|
@ -97,6 +97,12 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin
|
||||
- `_path` — Path to the file.
|
||||
- `_file` — Name of the file.
|
||||
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
|
@ -1,10 +1,10 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/redis
|
||||
sidebar_position: 10
|
||||
sidebar_label: Redis
|
||||
sidebar_position: 43
|
||||
sidebar_label: redis
|
||||
---
|
||||
|
||||
# Redis
|
||||
# redis
|
||||
|
||||
This table function allows integrating ClickHouse with [Redis](https://redis.io/).
|
||||
|
||||
|
@ -202,6 +202,12 @@ FROM s3(
|
||||
LIMIT 5;
|
||||
```
|
||||
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [S3 engine](../../engines/table-engines/integrations/s3.md)
|
||||
|
@ -53,6 +53,10 @@ Character `|` inside patterns is used to specify failover addresses. They are it
|
||||
- `_path` — Path to the `URL`.
|
||||
- `_file` — Resource name of the `URL`.
|
||||
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
|
@ -31,7 +31,7 @@ sidebar_label: Decimal
|
||||
## Внутреннее представление {#vnutrennee-predstavlenie}
|
||||
|
||||
Внутри данные представляются как знаковые целые числа, соответсвующей разрядности. Реальные диапазоны, хранящиеся в ячейках памяти несколько больше заявленных. Заявленные диапазоны Decimal проверяются только при вводе числа из строкового представления.
|
||||
Поскольку современные CPU не поддерживают 128-битные числа, операции над Decimal128 эмулируются программно. Decimal128 работает в разы медленней чем Decimal32/Decimal64.
|
||||
Поскольку современные CPU не поддерживают 128-битные и 256-битные числа, для операций над Decimal128 и Decimal256 эмулируются программно. Данные типы работают в разы медленнее, чем Decimal32/Decimal64.
|
||||
|
||||
## Операции и типы результата {#operatsii-i-tipy-rezultata}
|
||||
|
||||
@ -59,6 +59,10 @@ sidebar_label: Decimal
|
||||
|
||||
При выполнении операций над типом Decimal могут происходить целочисленные переполнения. Лишняя дробная часть отбрасывается (не округляется). Лишняя целочисленная часть приводит к исключению.
|
||||
|
||||
:::warning
|
||||
Проверка переполнения не реализована для Decimal128 и Decimal256. В случае переполнения неверный результат будёт возвращён без выбрасывания исключения.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
SELECT toDecimal32(2, 4) AS x, x / 3
|
||||
```
|
||||
|
@ -1223,10 +1223,12 @@ parseDateTimeBestEffort(time_string[, time_zone])
|
||||
- [Unix timestamp](https://ru.wikipedia.org/wiki/Unix-время) в строковом представлении. 9 или 10 символов.
|
||||
- Строка с датой и временем: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
|
||||
- Строка с датой, но без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` и т.д.
|
||||
- Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` принимается равным `2000-01`.
|
||||
- Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `MM` принимается равным `01`.
|
||||
- Строка, содержащая дату и время вместе с информацией о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm`, и т.д. Например, `2020-12-12 17:36:00 -5:00`.
|
||||
- Строка, содержащая дату и время в формате [syslog timestamp](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2): `Mmm dd hh:mm:ss`. Например, `Jun 9 14:20:32`.
|
||||
|
||||
Для всех форматов с разделителями функция распознаёт названия месяцев, выраженных в виде полного англоязычного имени месяца или в виде первых трёх символов имени месяца. Примеры: `24/DEC/18`, `24-Dec-18`, `01-September-2018`.
|
||||
Если год не указан, вместо него подставляется текущий год. Если в результате получается будущее время (даже на одну секунду впереди текущего момента времени), то текущий год заменяется на прошлый.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
@ -1297,23 +1299,46 @@ AS parseDateTimeBestEffort;
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT parseDateTimeBestEffort('10 20:19');
|
||||
SELECT toYear(now()) as year, parseDateTimeBestEffort('10 20:19');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─parseDateTimeBestEffort('10 20:19')─┐
|
||||
│ 2000-01-10 20:19:00 │
|
||||
└─────────────────────────────────────┘
|
||||
┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐
|
||||
│ 2023 │ 2023-01-10 20:19:00 │
|
||||
└──────┴─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
now() AS ts_now,
|
||||
formatDateTime(ts_around, '%b %e %T') AS syslog_arg
|
||||
SELECT
|
||||
ts_now,
|
||||
syslog_arg,
|
||||
parseDateTimeBestEffort(syslog_arg)
|
||||
FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌──────────────ts_now─┬─syslog_arg──────┬─parseDateTimeBestEffort(syslog_arg)─┐
|
||||
│ 2023-06-30 23:59:30 │ Jun 30 23:59:00 │ 2023-06-30 23:59:00 │
|
||||
│ 2023-06-30 23:59:30 │ Jul 1 00:00:00 │ 2022-07-01 00:00:00 │
|
||||
└─────────────────────┴─────────────────┴─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [Информация о формате ISO 8601 от @xkcd](https://xkcd.com/1179/)
|
||||
- [RFC 1123](https://tools.ietf.org/html/rfc1123)
|
||||
- [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123)
|
||||
- [toDate](#todate)
|
||||
- [toDateTime](#todatetime)
|
||||
- [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2)
|
||||
|
||||
## parseDateTimeBestEffortUS {#parsedatetimebesteffortUS}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /ru/whats-new/changelog/2017
|
||||
sidebar_position: 6
|
||||
sidebar_position: 60
|
||||
sidebar_label: 2017
|
||||
title: 2017 Changelog
|
||||
---
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /ru/whats-new/changelog/2018
|
||||
sidebar_position: 5
|
||||
sidebar_position: 50
|
||||
sidebar_label: 2018
|
||||
title: 2018 Changelog
|
||||
---
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /ru/whats-new/changelog/2019
|
||||
sidebar_position: 4
|
||||
sidebar_position: 40
|
||||
sidebar_label: 2019
|
||||
title: 2019 Changelog
|
||||
---
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /ru/whats-new/changelog/2020
|
||||
sidebar_position: 3
|
||||
sidebar_position: 30
|
||||
sidebar_label: 2020
|
||||
title: 2020 Changelog
|
||||
---
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /ru/whats-new/changelog/2021
|
||||
sidebar_position: 2
|
||||
sidebar_position: 20
|
||||
sidebar_label: 2021
|
||||
title: 2021 Changelog
|
||||
---
|
||||
|
10
docs/ru/whats-new/changelog/2022.mdx
Normal file
10
docs/ru/whats-new/changelog/2022.mdx
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
slug: /ru/whats-new/changelog/2022
|
||||
sidebar_position: 10
|
||||
sidebar_label: 2022
|
||||
title: 2022 Changelog
|
||||
---
|
||||
|
||||
import Changelog from '@site/docs/en/whats-new/changelog/2022.md';
|
||||
|
||||
<Changelog />
|
@ -2,5 +2,5 @@ label: 'Changelog'
|
||||
collapsible: true
|
||||
collapsed: true
|
||||
link:
|
||||
type: doc
|
||||
id: ru/whats-new/changelog/index
|
||||
type: generated-index
|
||||
title: Changelog
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
title: 2022 Changelog
|
||||
sidebar_label: 2023
|
||||
title: 2023 Changelog
|
||||
slug: /ru/whats-new/changelog/index
|
||||
---
|
||||
|
||||
|
@ -6,4 +6,4 @@ sidebar_label: Changelog
|
||||
|
||||
# Changelog
|
||||
|
||||
You can view the latest Changelog at [https://clickhouse.com/docs/en/whats-new/changelog/](https://clickhouse.com/docs/en/whats-new/changelog/)
|
||||
You can view the latest Changelog at [https://clickhouse.com/docs/en/whats-new/changelog/](/docs/en/whats-new/changelog/index.md)
|
||||
|
@ -409,8 +409,15 @@ if (ENABLE_CLICKHOUSE_KEEPER_CONVERTER)
|
||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-converter)
|
||||
endif ()
|
||||
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT)
|
||||
add_custom_target (clickhouse-keeper-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-keeper-client DEPENDS clickhouse)
|
||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper-client" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
if (NOT BUILD_STANDALONE_KEEPER)
|
||||
add_custom_target (clickhouse-keeper-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-keeper-client DEPENDS clickhouse)
|
||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper-client" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
# symlink to standalone keeper binary
|
||||
else ()
|
||||
add_custom_target (clickhouse-keeper-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse-keeper clickhouse-keeper-client DEPENDS clickhouse-keeper)
|
||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper-client" DESTINATION "${CMAKE_INSTALL_BINDIR}" COMPONENT clickhouse-keeper)
|
||||
endif ()
|
||||
|
||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-client)
|
||||
endif ()
|
||||
if (ENABLE_CLICKHOUSE_DISKS)
|
||||
|
@ -977,13 +977,7 @@ void Client::addOptions(OptionsDescription & options_description)
|
||||
("connection", po::value<std::string>(), "connection to use (from the client config), by default connection name is hostname")
|
||||
("secure,s", "Use TLS connection")
|
||||
("user,u", po::value<std::string>()->default_value("default"), "user")
|
||||
/** If "--password [value]" is used but the value is omitted, the bad argument exception will be thrown.
|
||||
* implicit_value is used to avoid this exception (to allow user to type just "--password")
|
||||
* Since currently boost provides no way to check if a value has been set implicitly for an option,
|
||||
* the "\n" is used to distinguish this case because there is hardly a chance a user would use "\n"
|
||||
* as the password.
|
||||
*/
|
||||
("password", po::value<std::string>()->implicit_value("\n", ""), "password")
|
||||
("password", po::value<std::string>(), "password")
|
||||
("ask-password", "ask-password")
|
||||
("quota_key", po::value<std::string>(), "A string to differentiate quotas when the user have keyed quotas configured on server")
|
||||
|
||||
@ -1397,6 +1391,14 @@ void Client::readArguments(
|
||||
arg = argv[arg_num];
|
||||
addMultiquery(arg, common_arguments);
|
||||
}
|
||||
else if (arg == "--password" && ((arg_num + 1) >= argc || std::string_view(argv[arg_num + 1]).starts_with('-')))
|
||||
{
|
||||
common_arguments.emplace_back(arg);
|
||||
/// No password was provided by user. Add '\n' as implicit password,
|
||||
/// which encodes that client should ask user for the password.
|
||||
/// '\n' is used because there is hardly a chance that a user would use '\n' as a password.
|
||||
common_arguments.emplace_back("\n");
|
||||
}
|
||||
else
|
||||
common_arguments.emplace_back(arg);
|
||||
}
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
#include <base/types.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -127,42 +127,42 @@ void KeeperClient::defineOptions(Poco::Util::OptionSet & options)
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("host", "h", "server hostname. default `localhost`")
|
||||
.argument("host")
|
||||
.argument("<host>")
|
||||
.binding("host"));
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("port", "p", "server port. default `2181`")
|
||||
.argument("port")
|
||||
.argument("<port>")
|
||||
.binding("port"));
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("query", "q", "will execute given query, then exit.")
|
||||
.argument("query")
|
||||
.argument("<query>")
|
||||
.binding("query"));
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("connection-timeout", "", "set connection timeout in seconds. default 10s.")
|
||||
.argument("connection-timeout")
|
||||
.argument("<seconds>")
|
||||
.binding("connection-timeout"));
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("session-timeout", "", "set session timeout in seconds. default 10s.")
|
||||
.argument("session-timeout")
|
||||
.argument("<seconds>")
|
||||
.binding("session-timeout"));
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("operation-timeout", "", "set operation timeout in seconds. default 10s.")
|
||||
.argument("operation-timeout")
|
||||
.argument("<seconds>")
|
||||
.binding("operation-timeout"));
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("history-file", "", "set path of history file. default `~/.keeper-client-history`")
|
||||
.argument("history-file")
|
||||
.argument("<file>")
|
||||
.binding("history-file"));
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("log-level", "", "set log level")
|
||||
.argument("log-level")
|
||||
.argument("<level>")
|
||||
.binding("log-level"));
|
||||
}
|
||||
|
||||
|
@ -112,6 +112,18 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
clickhouse-keeper.cpp
|
||||
)
|
||||
|
||||
# List of resources for clickhouse-keeper client
|
||||
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT)
|
||||
list(APPEND CLICKHOUSE_KEEPER_STANDALONE_SOURCES
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../programs/keeper-client/KeeperClient.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../programs/keeper-client/Commands.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../programs/keeper-client/Parser.cpp
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Client/LineReader.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Client/ReplxxLineReader.cpp
|
||||
)
|
||||
endif()
|
||||
|
||||
clickhouse_add_executable(clickhouse-keeper ${CLICKHOUSE_KEEPER_STANDALONE_SOURCES})
|
||||
|
||||
# Remove some redundant dependencies
|
||||
@ -122,6 +134,10 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/../../src/Core/include") # uses some includes from core
|
||||
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/../../src") # uses some includes from common
|
||||
|
||||
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT AND TARGET ch_rust::skim)
|
||||
target_link_libraries(clickhouse-keeper PRIVATE ch_rust::skim)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clickhouse-keeper
|
||||
PRIVATE
|
||||
ch_contrib::abseil_swiss_tables
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "Core/Defines.h"
|
||||
#include "config.h"
|
||||
#include "config_version.h"
|
||||
#include "config_tools.h"
|
||||
|
||||
|
||||
#if USE_SSL
|
||||
# include <Poco/Net/Context.h>
|
||||
@ -131,7 +133,10 @@ int Keeper::run()
|
||||
if (config().hasOption("help"))
|
||||
{
|
||||
Poco::Util::HelpFormatter help_formatter(Keeper::options());
|
||||
auto header_str = fmt::format("{} [OPTION] [-- [ARG]...]\n"
|
||||
auto header_str = fmt::format("{0} [OPTION] [-- [ARG]...]\n"
|
||||
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
||||
"{0} client [OPTION]\n"
|
||||
#endif
|
||||
"positional arguments can be used to rewrite config.xml properties, for example, --http_port=8010",
|
||||
commandName());
|
||||
help_formatter.setHeader(header_str);
|
||||
|
@ -1,6 +1,30 @@
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include "config_tools.h"
|
||||
|
||||
|
||||
int mainEntryClickHouseKeeper(int argc, char ** argv);
|
||||
|
||||
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
||||
int mainEntryClickHouseKeeperClient(int argc, char ** argv);
|
||||
#endif
|
||||
|
||||
int main(int argc_, char ** argv_)
|
||||
{
|
||||
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
||||
|
||||
if (argc_ >= 2)
|
||||
{
|
||||
/// 'clickhouse-keeper --client ...' and 'clickhouse-keeper client ...' are OK
|
||||
if (strcmp(argv_[1], "--client") == 0 || strcmp(argv_[1], "client") == 0)
|
||||
{
|
||||
argv_[1] = argv_[0];
|
||||
return mainEntryClickHouseKeeperClient(--argc_, argv_ + 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (argc_ > 0 && (strcmp(argv_[0], "clickhouse-keeper-client") == 0 || endsWith(argv_[0], "/clickhouse-keeper-client")))
|
||||
return mainEntryClickHouseKeeperClient(argc_, argv_);
|
||||
#endif
|
||||
|
||||
return mainEntryClickHouseKeeper(argc_, argv_);
|
||||
}
|
||||
|
@ -1705,7 +1705,6 @@ try
|
||||
#endif
|
||||
|
||||
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
|
||||
|
||||
async_metrics.start();
|
||||
|
||||
{
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <bitset>
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -333,7 +333,7 @@ void ContextAccess::calculateAccessRights() const
|
||||
boost::algorithm::join(roles_info->getCurrentRolesNames(), ", "),
|
||||
boost::algorithm::join(roles_info->getEnabledRolesNames(), ", "));
|
||||
}
|
||||
LOG_TRACE(trace_log, "Settings: readonly={}, allow_ddl={}, allow_introspection_functions={}", params.readonly, params.allow_ddl, params.allow_introspection);
|
||||
LOG_TRACE(trace_log, "Settings: readonly = {}, allow_ddl = {}, allow_introspection_functions = {}", params.readonly, params.allow_ddl, params.allow_introspection);
|
||||
LOG_TRACE(trace_log, "List of all grants: {}", access->toString());
|
||||
LOG_TRACE(trace_log, "List of all grants including implicit: {}", access_with_implicit->toString());
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Access/GSSAcceptor.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <base/extended_types.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <map>
|
||||
@ -42,7 +43,7 @@ public:
|
||||
private:
|
||||
struct LDAPCacheEntry
|
||||
{
|
||||
std::size_t last_successful_params_hash = 0;
|
||||
UInt128 last_successful_params_hash = 0;
|
||||
std::chrono::steady_clock::time_point last_successful_authentication_timestamp;
|
||||
LDAPClient::SearchResultsList last_successful_role_search_results;
|
||||
};
|
||||
|
@ -146,8 +146,8 @@ public:
|
||||
for (const auto & argument : this->argument_types)
|
||||
can_be_compiled &= canBeNativeType(*argument);
|
||||
|
||||
auto return_type = this->getResultType();
|
||||
can_be_compiled &= canBeNativeType(*return_type);
|
||||
const auto & result_type = this->getResultType();
|
||||
can_be_compiled &= canBeNativeType(*result_type);
|
||||
|
||||
return can_be_compiled;
|
||||
}
|
||||
@ -198,8 +198,8 @@ public:
|
||||
auto * denominator_ptr = b.CreateConstGEP1_32(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
||||
|
||||
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, b.getDoubleTy());
|
||||
auto * double_denominator = nativeCast<Denominator>(b, denominator_value, b.getDoubleTy());
|
||||
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, this->getResultType());
|
||||
auto * double_denominator = nativeCast<Denominator>(b, denominator_value, this->getResultType());
|
||||
|
||||
return b.CreateFDiv(double_numerator, double_denominator);
|
||||
}
|
||||
@ -308,7 +308,7 @@ public:
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
@ -316,7 +316,7 @@ public:
|
||||
|
||||
auto * numerator_ptr = aggregate_data_ptr;
|
||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||
auto * value_cast_to_numerator = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
|
||||
auto * value_cast_to_numerator = nativeCast(b, arguments[0], toNativeDataType<Numerator>());
|
||||
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator);
|
||||
b.CreateStore(numerator_result_value, numerator_ptr);
|
||||
|
||||
|
@ -30,7 +30,7 @@ public:
|
||||
|
||||
using Numerator = typename Base::Numerator;
|
||||
using Denominator = typename Base::Denominator;
|
||||
using Fraction = typename Base::Fraction;
|
||||
using Fraction = typename Base::Fraction;
|
||||
|
||||
void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
@ -55,7 +55,7 @@ public:
|
||||
return can_be_compiled;
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
@ -63,8 +63,9 @@ public:
|
||||
auto * numerator_ptr = aggregate_data_ptr;
|
||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||
|
||||
auto * argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
|
||||
auto * weight = nativeCast(b, arguments_types[1], argument_values[1], numerator_type);
|
||||
auto numerator_data_type = toNativeDataType<Numerator>();
|
||||
auto * argument = nativeCast(b, arguments[0], numerator_data_type);
|
||||
auto * weight = nativeCast(b, arguments[1], numerator_data_type);
|
||||
|
||||
llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight);
|
||||
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication);
|
||||
@ -75,7 +76,7 @@ public:
|
||||
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
|
||||
auto * denominator_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||
|
||||
auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], denominator_type);
|
||||
auto * weight_cast_to_denominator = nativeCast(b, arguments[1], toNativeDataType<Denominator>());
|
||||
|
||||
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
||||
auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator);
|
||||
|
@ -148,7 +148,7 @@ public:
|
||||
Data::compileCreate(builder, value_ptr);
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
@ -157,8 +157,7 @@ public:
|
||||
auto * value_ptr = aggregate_data_ptr;
|
||||
auto * value = b.CreateLoad(return_type, value_ptr);
|
||||
|
||||
const auto & argument_value = argument_values[0];
|
||||
auto * result_value = Data::compileUpdate(builder, value, argument_value);
|
||||
auto * result_value = Data::compileUpdate(builder, value, arguments[0].value);
|
||||
|
||||
b.CreateStore(result_value, value_ptr);
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ public:
|
||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> &) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType &) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
@ -309,13 +309,13 @@ public:
|
||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * return_type = toNativeType(b, this->getResultType());
|
||||
|
||||
auto * is_null_value = b.CreateExtractValue(values[0], {1});
|
||||
auto * is_null_value = b.CreateExtractValue(arguments[0].value, {1});
|
||||
auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1));
|
||||
|
||||
auto * count_value_ptr = aggregate_data_ptr;
|
||||
|
@ -188,18 +188,18 @@ public:
|
||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
const auto & nullable_type = arguments_types[0];
|
||||
const auto & nullable_value = argument_values[0];
|
||||
const auto & nullable_type = arguments[0].type;
|
||||
const auto & nullable_value = arguments[0].value;
|
||||
|
||||
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
||||
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
||||
|
||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
||||
const auto & predicate_type = arguments.back().type;
|
||||
auto * predicate_value = arguments.back().value;
|
||||
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
||||
|
||||
auto * head = b.GetInsertBlock();
|
||||
@ -219,7 +219,7 @@ public:
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { ValueWithType(wrapped_value, removeNullable(nullable_type)) });
|
||||
b.CreateBr(join_block);
|
||||
|
||||
b.SetInsertPoint(join_block);
|
||||
@ -370,38 +370,31 @@ public:
|
||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
/// TODO: Check
|
||||
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
size_t arguments_size = arguments_types.size();
|
||||
size_t arguments_size = arguments.size();
|
||||
|
||||
ValuesWithType wrapped_arguments;
|
||||
wrapped_arguments.reserve(arguments_size);
|
||||
|
||||
DataTypes non_nullable_types;
|
||||
std::vector<llvm::Value * > wrapped_values;
|
||||
std::vector<llvm::Value * > is_null_values;
|
||||
|
||||
non_nullable_types.resize(arguments_size);
|
||||
wrapped_values.resize(arguments_size);
|
||||
is_null_values.resize(arguments_size);
|
||||
|
||||
for (size_t i = 0; i < arguments_size; ++i)
|
||||
{
|
||||
const auto & argument_value = argument_values[i];
|
||||
const auto & argument_value = arguments[i].value;
|
||||
const auto & argument_type = arguments[i].type;
|
||||
|
||||
if (is_nullable[i])
|
||||
{
|
||||
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
||||
is_null_values[i] = b.CreateExtractValue(argument_value, {1});
|
||||
|
||||
wrapped_values[i] = wrapped_value;
|
||||
non_nullable_types[i] = removeNullable(arguments_types[i]);
|
||||
is_null_values.emplace_back(b.CreateExtractValue(argument_value, {1}));
|
||||
wrapped_arguments.emplace_back(wrapped_value, removeNullable(argument_type));
|
||||
}
|
||||
else
|
||||
{
|
||||
wrapped_values[i] = argument_value;
|
||||
non_nullable_types[i] = arguments_types[i];
|
||||
wrapped_arguments.emplace_back(argument_value, argument_type);
|
||||
}
|
||||
}
|
||||
|
||||
@ -415,9 +408,6 @@ public:
|
||||
|
||||
for (auto * is_null_value : is_null_values)
|
||||
{
|
||||
if (!is_null_value)
|
||||
continue;
|
||||
|
||||
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
||||
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
||||
}
|
||||
@ -426,8 +416,8 @@ public:
|
||||
|
||||
b.SetInsertPoint(join_block_after_null_checks);
|
||||
|
||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
||||
const auto & predicate_type = arguments.back().type;
|
||||
auto * predicate_value = arguments.back().value;
|
||||
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
||||
|
||||
auto * if_true = llvm::BasicBlock::Create(head->getContext(), "if_true", head->getParent());
|
||||
@ -444,7 +434,7 @@ public:
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, wrapped_arguments);
|
||||
b.CreateBr(join_block);
|
||||
|
||||
b.SetInsertPoint(join_block);
|
||||
|
@ -223,12 +223,12 @@ public:
|
||||
nested_func->compileCreate(builder, aggregate_data_ptr);
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
||||
const auto & predicate_type = arguments.back().type;
|
||||
auto * predicate_value = arguments.back().value;
|
||||
|
||||
auto * head = b.GetInsertBlock();
|
||||
|
||||
@ -242,21 +242,9 @@ public:
|
||||
|
||||
b.SetInsertPoint(if_true);
|
||||
|
||||
size_t arguments_size_without_predicate = arguments_types.size() - 1;
|
||||
|
||||
DataTypes argument_types_without_predicate;
|
||||
std::vector<llvm::Value *> argument_values_without_predicate;
|
||||
|
||||
argument_types_without_predicate.resize(arguments_size_without_predicate);
|
||||
argument_values_without_predicate.resize(arguments_size_without_predicate);
|
||||
|
||||
for (size_t i = 0; i < arguments_size_without_predicate; ++i)
|
||||
{
|
||||
argument_types_without_predicate[i] = arguments_types[i];
|
||||
argument_values_without_predicate[i] = argument_values[i];
|
||||
}
|
||||
|
||||
nested_func->compileAdd(builder, aggregate_data_ptr, argument_types_without_predicate, argument_values_without_predicate);
|
||||
ValuesWithType arguments_without_predicate = arguments;
|
||||
arguments_without_predicate.pop_back();
|
||||
nested_func->compileAdd(builder, aggregate_data_ptr, arguments_without_predicate);
|
||||
|
||||
b.CreateBr(join_block);
|
||||
|
||||
|
@ -1459,11 +1459,11 @@ public:
|
||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->sizeOfData(), llvm::assumeAligned(this->alignOfData()));
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
if constexpr (Data::is_compilable)
|
||||
{
|
||||
Data::compileChangeIfBetter(builder, aggregate_data_ptr, argument_values[0]);
|
||||
Data::compileChangeIfBetter(builder, aggregate_data_ptr, arguments[0].value);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -378,12 +378,12 @@ public:
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
const auto & nullable_type = arguments_types[0];
|
||||
const auto & nullable_value = argument_values[0];
|
||||
const auto & nullable_type = arguments[0].type;
|
||||
const auto & nullable_value = arguments[0].value;
|
||||
|
||||
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
||||
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
||||
@ -405,7 +405,7 @@ public:
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { ValueWithType(wrapped_value, removeNullable(nullable_type)) });
|
||||
b.CreateBr(join_block);
|
||||
|
||||
b.SetInsertPoint(join_block);
|
||||
@ -568,36 +568,32 @@ public:
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
size_t arguments_size = arguments_types.size();
|
||||
size_t arguments_size = arguments.size();
|
||||
|
||||
DataTypes non_nullable_types;
|
||||
std::vector<llvm::Value * > wrapped_values;
|
||||
std::vector<llvm::Value * > is_null_values;
|
||||
ValuesWithType wrapped_arguments;
|
||||
wrapped_arguments.reserve(arguments_size);
|
||||
|
||||
non_nullable_types.resize(arguments_size);
|
||||
wrapped_values.resize(arguments_size);
|
||||
is_null_values.resize(arguments_size);
|
||||
std::vector<llvm::Value *> is_null_values;
|
||||
is_null_values.reserve(arguments_size);
|
||||
|
||||
for (size_t i = 0; i < arguments_size; ++i)
|
||||
{
|
||||
const auto & argument_value = argument_values[i];
|
||||
const auto & argument_value = arguments[i].value;
|
||||
const auto & argument_type = arguments[i].type;
|
||||
|
||||
if (is_nullable[i])
|
||||
{
|
||||
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
||||
is_null_values[i] = b.CreateExtractValue(argument_value, {1});
|
||||
|
||||
wrapped_values[i] = wrapped_value;
|
||||
non_nullable_types[i] = removeNullable(arguments_types[i]);
|
||||
is_null_values.emplace_back(b.CreateExtractValue(argument_value, {1}));
|
||||
wrapped_arguments.emplace_back(wrapped_value, removeNullable(argument_type));
|
||||
}
|
||||
else
|
||||
{
|
||||
wrapped_values[i] = argument_value;
|
||||
non_nullable_types[i] = arguments_types[i];
|
||||
wrapped_arguments.emplace_back(argument_value, argument_type);
|
||||
}
|
||||
}
|
||||
|
||||
@ -612,9 +608,6 @@ public:
|
||||
|
||||
for (auto * is_null_value : is_null_values)
|
||||
{
|
||||
if (!is_null_value)
|
||||
continue;
|
||||
|
||||
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
||||
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
||||
}
|
||||
@ -630,7 +623,7 @@ public:
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, arguments_types, wrapped_values);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, wrapped_arguments);
|
||||
b.CreateBr(join_block);
|
||||
|
||||
b.SetInsertPoint(join_block);
|
||||
|
@ -588,7 +588,7 @@ public:
|
||||
b.CreateStore(llvm::Constant::getNullValue(return_type), aggregate_sum_ptr);
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
@ -597,10 +597,7 @@ public:
|
||||
auto * sum_value_ptr = aggregate_data_ptr;
|
||||
auto * sum_value = b.CreateLoad(return_type, sum_value_ptr);
|
||||
|
||||
const auto & argument_type = arguments_types[0];
|
||||
const auto & argument_value = argument_values[0];
|
||||
|
||||
auto * value_cast_to_result = nativeCast(b, argument_type, argument_value, return_type);
|
||||
auto * value_cast_to_result = nativeCast(b, arguments[0], this->getResultType());
|
||||
auto * sum_result_value = sum_value->getType()->isIntegerTy() ? b.CreateAdd(sum_value, value_cast_to_result) : b.CreateFAdd(sum_value, value_cast_to_result);
|
||||
|
||||
b.CreateStore(sum_result_value, sum_value_ptr);
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Core/Block.h>
|
||||
#include <Core/ColumnNumbers.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/ValuesWithType.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <base/types.h>
|
||||
#include <Common/Exception.h>
|
||||
@ -389,7 +390,7 @@ public:
|
||||
}
|
||||
|
||||
/// compileAdd should generate code for updating aggregate function state stored in aggregate_data_ptr
|
||||
virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const DataTypes & /*arguments_types*/, const std::vector<llvm::Value *> & /*arguments_values*/) const
|
||||
virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const ValuesWithType & /*arguments*/) const
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName());
|
||||
}
|
||||
|
@ -185,11 +185,10 @@ void BackupCoordinationReplicatedTables::addPartNames(PartNamesForTableReplica &
|
||||
const String & other_replica_name = **other.replica_names.begin();
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_BACKUP_TABLE,
|
||||
"Table {} on replica {} has part {} which is different from the part on replica {}. Must be the same",
|
||||
table_name_for_logs,
|
||||
replica_name,
|
||||
part_name,
|
||||
other_replica_name);
|
||||
"Table {} on replica {} has part {} different from the part on replica {} "
|
||||
"(checksum '{}' on replica {} != checksum '{}' on replica {})",
|
||||
table_name_for_logs, replica_name, part_name, other_replica_name,
|
||||
getHexUIntLowercase(checksum), replica_name, getHexUIntLowercase(other.checksum), other_replica_name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,6 +85,9 @@ void BackupCoordinationStageSync::setError(const String & current_host, const Ex
|
||||
writeException(exception, buf, true);
|
||||
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
|
||||
|
||||
/// When backup/restore fails, it removes the nodes from Zookeeper.
|
||||
/// Sometimes it fails to remove all nodes. It's possible that it removes /error node, but fails to remove /stage node,
|
||||
/// so the following line tries to preserve the error status.
|
||||
auto code = zookeeper->trySet(zookeeper_path, Stage::ERROR);
|
||||
if (code != Coordination::Error::ZOK)
|
||||
throw zkutil::KeeperException(code, zookeeper_path);
|
||||
|
@ -152,8 +152,7 @@ namespace
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (coordination)
|
||||
coordination->setError(Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode()));
|
||||
sendExceptionToCoordination(coordination, Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1165,11 +1165,20 @@ void ClientBase::onProfileEvents(Block & block)
|
||||
/// Flush all buffers.
|
||||
void ClientBase::resetOutput()
|
||||
{
|
||||
/// Order is important: format, compression, file
|
||||
|
||||
if (output_format)
|
||||
output_format->finalize();
|
||||
output_format.reset();
|
||||
|
||||
logs_out_stream.reset();
|
||||
|
||||
if (out_file_buf)
|
||||
{
|
||||
out_file_buf->finalize();
|
||||
out_file_buf.reset();
|
||||
}
|
||||
|
||||
if (pager_cmd)
|
||||
{
|
||||
pager_cmd->in.close();
|
||||
@ -1177,15 +1186,9 @@ void ClientBase::resetOutput()
|
||||
}
|
||||
pager_cmd = nullptr;
|
||||
|
||||
if (out_file_buf)
|
||||
{
|
||||
out_file_buf->next();
|
||||
out_file_buf.reset();
|
||||
}
|
||||
|
||||
if (out_logs_buf)
|
||||
{
|
||||
out_logs_buf->next();
|
||||
out_logs_buf->finalize();
|
||||
out_logs_buf.reset();
|
||||
}
|
||||
|
||||
|
@ -588,7 +588,7 @@ void Connection::sendQuery(
|
||||
if (method == "ZSTD")
|
||||
level = settings->network_zstd_compression_level;
|
||||
|
||||
CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs);
|
||||
CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_deflate_qpl_codec);
|
||||
compression_codec = CompressionCodecFactory::instance().get(method, level);
|
||||
}
|
||||
else
|
||||
|
@ -18,7 +18,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
|
||||
String client_name,
|
||||
Protocol::Compression compression,
|
||||
Protocol::Secure secure,
|
||||
Int64 priority)
|
||||
Priority priority)
|
||||
{
|
||||
Key key{
|
||||
max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
|
||||
@ -74,7 +74,7 @@ size_t ConnectionPoolFactory::KeyHash::operator()(const ConnectionPoolFactory::K
|
||||
hash_combine(seed, hash_value(k.client_name));
|
||||
hash_combine(seed, hash_value(k.compression));
|
||||
hash_combine(seed, hash_value(k.secure));
|
||||
hash_combine(seed, hash_value(k.priority));
|
||||
hash_combine(seed, hash_value(k.priority.value));
|
||||
return seed;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/PoolBase.h>
|
||||
#include <Common/Priority.h>
|
||||
#include <Client/Connection.h>
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
#include <Core/Settings.h>
|
||||
@ -34,7 +35,7 @@ public:
|
||||
const Settings * settings = nullptr,
|
||||
bool force_connected = true) = 0;
|
||||
|
||||
virtual Int64 getPriority() const { return 1; }
|
||||
virtual Priority getPriority() const { return Priority{1}; }
|
||||
};
|
||||
|
||||
using ConnectionPoolPtr = std::shared_ptr<IConnectionPool>;
|
||||
@ -60,7 +61,7 @@ public:
|
||||
const String & client_name_,
|
||||
Protocol::Compression compression_,
|
||||
Protocol::Secure secure_,
|
||||
Int64 priority_ = 1)
|
||||
Priority priority_ = Priority{1})
|
||||
: Base(max_connections_,
|
||||
&Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
|
||||
host(host_),
|
||||
@ -103,7 +104,7 @@ public:
|
||||
return host + ":" + toString(port);
|
||||
}
|
||||
|
||||
Int64 getPriority() const override
|
||||
Priority getPriority() const override
|
||||
{
|
||||
return priority;
|
||||
}
|
||||
@ -134,7 +135,7 @@ private:
|
||||
String client_name;
|
||||
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
|
||||
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.
|
||||
Int64 priority; /// priority from <remote_servers>
|
||||
Priority priority; /// priority from <remote_servers>
|
||||
};
|
||||
|
||||
/**
|
||||
@ -157,7 +158,7 @@ public:
|
||||
String client_name;
|
||||
Protocol::Compression compression;
|
||||
Protocol::Secure secure;
|
||||
Int64 priority;
|
||||
Priority priority;
|
||||
};
|
||||
|
||||
struct KeyHash
|
||||
@ -180,7 +181,7 @@ public:
|
||||
String client_name,
|
||||
Protocol::Compression compression,
|
||||
Protocol::Secure secure,
|
||||
Int64 priority);
|
||||
Priority priority);
|
||||
private:
|
||||
mutable std::mutex mutex;
|
||||
using ConnectionPoolWeakPtr = std::weak_ptr<IConnectionPool>;
|
||||
|
@ -71,7 +71,7 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts
|
||||
return Base::get(max_ignored_errors, fallback_to_stale_replicas, try_get_entry, get_priority);
|
||||
}
|
||||
|
||||
Int64 ConnectionPoolWithFailover::getPriority() const
|
||||
Priority ConnectionPoolWithFailover::getPriority() const
|
||||
{
|
||||
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto & a, const auto & b)
|
||||
{
|
||||
|
@ -48,7 +48,7 @@ public:
|
||||
const Settings * settings,
|
||||
bool force_connected) override; /// From IConnectionPool
|
||||
|
||||
Int64 getPriority() const override; /// From IConnectionPool
|
||||
Priority getPriority() const override; /// From IConnectionPool
|
||||
|
||||
/** Allocates up to the specified number of connections to work.
|
||||
* Connections provide access to different replicas of one shard.
|
||||
|
@ -528,6 +528,7 @@ StringRef ColumnAggregateFunction::serializeValueIntoArena(size_t n, Arena & are
|
||||
{
|
||||
WriteBufferFromArena out(arena, begin);
|
||||
func->serialize(data[n], out, version);
|
||||
out.finalize();
|
||||
return out.complete();
|
||||
}
|
||||
|
||||
|
@ -151,13 +151,13 @@ public:
|
||||
|
||||
ColumnPtr compress() const override;
|
||||
|
||||
void forEachSubcolumn(ColumnCallback callback) const override
|
||||
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||
{
|
||||
callback(offsets);
|
||||
callback(data);
|
||||
}
|
||||
|
||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
||||
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||
{
|
||||
callback(*offsets);
|
||||
offsets->forEachSubcolumnRecursively(callback);
|
||||
|
@ -230,12 +230,12 @@ public:
|
||||
data->getExtremes(min, max);
|
||||
}
|
||||
|
||||
void forEachSubcolumn(ColumnCallback callback) const override
|
||||
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||
{
|
||||
callback(data);
|
||||
}
|
||||
|
||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
||||
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||
{
|
||||
callback(*data);
|
||||
data->forEachSubcolumnRecursively(callback);
|
||||
|
@ -166,7 +166,7 @@ public:
|
||||
size_t byteSizeAt(size_t n) const override { return getDictionary().byteSizeAt(getIndexes().getUInt(n)); }
|
||||
size_t allocatedBytes() const override { return idx.getPositions()->allocatedBytes() + getDictionary().allocatedBytes(); }
|
||||
|
||||
void forEachSubcolumn(ColumnCallback callback) const override
|
||||
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||
{
|
||||
callback(idx.getPositionsPtr());
|
||||
|
||||
@ -175,7 +175,7 @@ public:
|
||||
callback(dictionary.getColumnUniquePtr());
|
||||
}
|
||||
|
||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
||||
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||
{
|
||||
callback(*idx.getPositionsPtr());
|
||||
idx.getPositionsPtr()->forEachSubcolumnRecursively(callback);
|
||||
@ -340,7 +340,7 @@ private:
|
||||
explicit Dictionary(MutableColumnPtr && column_unique, bool is_shared);
|
||||
explicit Dictionary(ColumnPtr column_unique, bool is_shared);
|
||||
|
||||
const ColumnPtr & getColumnUniquePtr() const { return column_unique; }
|
||||
const WrappedPtr & getColumnUniquePtr() const { return column_unique; }
|
||||
WrappedPtr & getColumnUniquePtr() { return column_unique; }
|
||||
|
||||
const IColumnUnique & getColumnUnique() const { return static_cast<const IColumnUnique &>(*column_unique); }
|
||||
|
@ -273,12 +273,12 @@ void ColumnMap::getExtremes(Field & min, Field & max) const
|
||||
max = std::move(map_max_value);
|
||||
}
|
||||
|
||||
void ColumnMap::forEachSubcolumn(ColumnCallback callback) const
|
||||
void ColumnMap::forEachSubcolumn(MutableColumnCallback callback)
|
||||
{
|
||||
callback(nested);
|
||||
}
|
||||
|
||||
void ColumnMap::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
||||
void ColumnMap::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
||||
{
|
||||
callback(*nested);
|
||||
nested->forEachSubcolumnRecursively(callback);
|
||||
|
@ -88,8 +88,8 @@ public:
|
||||
size_t byteSizeAt(size_t n) const override;
|
||||
size_t allocatedBytes() const override;
|
||||
void protect() override;
|
||||
void forEachSubcolumn(ColumnCallback callback) const override;
|
||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
|
||||
void forEachSubcolumn(MutableColumnCallback callback) override;
|
||||
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
|
||||
bool structureEquals(const IColumn & rhs) const override;
|
||||
double getRatioOfDefaultRows(double sample_ratio) const override;
|
||||
UInt64 getNumberOfDefaultRows() const override;
|
||||
|
@ -130,13 +130,13 @@ public:
|
||||
|
||||
ColumnPtr compress() const override;
|
||||
|
||||
void forEachSubcolumn(ColumnCallback callback) const override
|
||||
void forEachSubcolumn(MutableColumnCallback callback) override
|
||||
{
|
||||
callback(nested_column);
|
||||
callback(null_map);
|
||||
}
|
||||
|
||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
|
||||
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||
{
|
||||
callback(*nested_column);
|
||||
nested_column->forEachSubcolumnRecursively(callback);
|
||||
|
@ -664,18 +664,18 @@ size_t ColumnObject::allocatedBytes() const
|
||||
return res;
|
||||
}
|
||||
|
||||
void ColumnObject::forEachSubcolumn(ColumnCallback callback) const
|
||||
void ColumnObject::forEachSubcolumn(MutableColumnCallback callback)
|
||||
{
|
||||
for (const auto & entry : subcolumns)
|
||||
for (const auto & part : entry->data.data)
|
||||
for (auto & entry : subcolumns)
|
||||
for (auto & part : entry->data.data)
|
||||
callback(part);
|
||||
}
|
||||
|
||||
void ColumnObject::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
||||
void ColumnObject::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
||||
{
|
||||
for (const auto & entry : subcolumns)
|
||||
for (auto & entry : subcolumns)
|
||||
{
|
||||
for (const auto & part : entry->data.data)
|
||||
for (auto & part : entry->data.data)
|
||||
{
|
||||
callback(*part);
|
||||
part->forEachSubcolumnRecursively(callback);
|
||||
|
@ -206,8 +206,8 @@ public:
|
||||
size_t size() const override;
|
||||
size_t byteSize() const override;
|
||||
size_t allocatedBytes() const override;
|
||||
void forEachSubcolumn(ColumnCallback callback) const override;
|
||||
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
|
||||
void forEachSubcolumn(MutableColumnCallback callback) override;
|
||||
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
|
||||
void insert(const Field & field) override;
|
||||
void insertDefault() override;
|
||||
void insertFrom(const IColumn & src, size_t n) override;
|
||||
|
@ -751,13 +751,13 @@ bool ColumnSparse::structureEquals(const IColumn & rhs) const
|
||||
return false;
|
||||
}
|
||||
|
||||
void ColumnSparse::forEachSubcolumn(ColumnCallback callback) const
|
||||
void ColumnSparse::forEachSubcolumn(MutableColumnCallback callback)
|
||||
{
|
||||
callback(values);
|
||||
callback(offsets);
|
||||
}
|
||||
|
||||
void ColumnSparse::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
|
||||
void ColumnSparse::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
|
||||
{
|
||||
callback(*values);
|
||||
values->forEachSubcolumnRecursively(callback);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user