Merge remote-tracking branch 'upstream/master' into min-file-segment-size

This commit is contained in:
kssenii 2023-06-20 13:31:44 +02:00
commit 5c4c2a3982
264 changed files with 3687 additions and 1130 deletions

View File

@ -12,6 +12,7 @@ jobs:
# don't use dockerhub push because this image updates so rarely
WoboqCodebrowser:
runs-on: [self-hosted, style-checker]
timeout-minutes: 420 # the task is pretty heavy, so there's an additional hour
steps:
- name: Set envs
run: |

2
.gitmodules vendored
View File

@ -19,7 +19,7 @@
url = https://github.com/google/googletest
[submodule "contrib/capnproto"]
path = contrib/capnproto
url = https://github.com/capnproto/capnproto
url = https://github.com/ClickHouse/capnproto
[submodule "contrib/double-conversion"]
path = contrib/double-conversion
url = https://github.com/google/double-conversion

View File

@ -6,8 +6,10 @@ rules:
level: warning
indent-sequences: consistent
line-length:
# there are some bash -c "", so this is OK
max: 300
# there are:
# - bash -c "", so this is OK
# - yaml in tests
max: 1000
level: warning
comments:
min-spaces-from-content: 1

View File

@ -4,7 +4,7 @@ if (SANITIZE OR NOT (
))
if (ENABLE_JEMALLOC)
message (${RECONFIGURE_MESSAGE_LEVEL}
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds.")
"jemalloc is disabled implicitly: it doesn't work with sanitizers and can only be used with x86_64, aarch64, or ppc64le Linux or FreeBSD builds and RelWithDebInfo macOS builds. Use -DENABLE_JEMALLOC=0")
endif ()
set (ENABLE_JEMALLOC OFF)
else ()

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
esac
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
ARG VERSION="23.5.2.7"
ARG VERSION="23.5.3.24"
ARG PACKAGES="clickhouse-keeper"
# user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.5.2.7"
ARG VERSION="23.5.3.24"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="23.5.2.7"
ARG VERSION="23.5.3.24"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image

View File

@ -20,6 +20,7 @@ For more information and documentation see https://clickhouse.com/.
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A). Most ARM CPUs after 2017 support ARMv8.2-A. A notable exception is Raspberry Pi 4 from 2019 whose CPU only supports ARMv8.0-A.
- Since the Clickhouse 23.3 Ubuntu image started using `ubuntu:22.04` as its base image, it requires docker version >= `20.10.10`, or use `docker run -- privileged` instead. Alternatively, try the Clickhouse Alpine image.
## How to use this image

View File

@ -12,10 +12,10 @@ RUN apt-get update --yes && \
# We need to get the repository's HEAD each time despite, so we invalidate layers' cache
ARG CACHE_INVALIDATOR=0
RUN mkdir /sqlancer && \
wget -q -O- https://github.com/sqlancer/sqlancer/archive/master.tar.gz | \
wget -q -O- https://github.com/sqlancer/sqlancer/archive/main.tar.gz | \
tar zx -C /sqlancer && \
cd /sqlancer/sqlancer-master && \
mvn package -DskipTests && \
cd /sqlancer/sqlancer-main && \
mvn --no-transfer-progress package -DskipTests && \
rm -r /root/.m2
COPY run.sh /

View File

@ -16,7 +16,6 @@ def process_result(result_folder):
"TLPGroupBy",
"TLPHaving",
"TLPWhere",
"TLPWhereGroupBy",
"NoREC",
]
failed_tests = []

View File

@ -33,7 +33,7 @@ cd /workspace
for _ in $(seq 1 60); do if [[ $(wget -q 'localhost:8123' -O-) == 'Ok.' ]]; then break ; else sleep 1; fi ; done
cd /sqlancer/sqlancer-master
cd /sqlancer/sqlancer-main
TIMEOUT=300
NUM_QUERIES=1000

View File

@ -59,8 +59,7 @@ install_packages previous_release_package_folder
# available for dump via clickhouse-local
configure
# it contains some new settings, but we can safely remove it
rm /etc/clickhouse-server/config.d/merge_tree.xml
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
start
stop
@ -86,8 +85,7 @@ export USE_S3_STORAGE_FOR_MERGE_TREE=1
export ZOOKEEPER_FAULT_INJECTION=0
configure
# it contains some new settings, but we can safely remove it
rm /etc/clickhouse-server/config.d/merge_tree.xml
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
start

View File

@ -0,0 +1,19 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v22.8.19.10-lts (989bc2fe8b0) FIXME as compared to v22.8.18.31-lts (4de7a95a544)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,22 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.3.4.17-lts (2c99b73ff40) FIXME as compared to v23.3.3.52-lts (cb963c474db)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix crash when Pool::Entry::disconnect() is called [#50334](https://github.com/ClickHouse/ClickHouse/pull/50334) ([Val Doroshchuk](https://github.com/valbok)).
* Avoid storing logs in Keeper containing unknown operation [#50751](https://github.com/ClickHouse/ClickHouse/pull/50751) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,22 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.4.4.16-stable (747ba4fc6a0) FIXME as compared to v23.4.3.48-stable (d9199f8d3cc)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix crash when Pool::Entry::disconnect() is called [#50334](https://github.com/ClickHouse/ClickHouse/pull/50334) ([Val Doroshchuk](https://github.com/valbok)).
* Fix iceberg V2 optional metadata parsing [#50974](https://github.com/ClickHouse/ClickHouse/pull/50974) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,26 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.5.3.24-stable (76f54616d3b) FIXME as compared to v23.5.2.7-stable (5751aa1ab9f)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix Log family table return wrong rows count after truncate [#50585](https://github.com/ClickHouse/ClickHouse/pull/50585) ([flynn](https://github.com/ucasfl)).
* Fix bug in `uniqExact` parallel merging [#50590](https://github.com/ClickHouse/ClickHouse/pull/50590) ([Nikita Taranov](https://github.com/nickitat)).
* Revert recent grace hash join changes [#50699](https://github.com/ClickHouse/ClickHouse/pull/50699) ([vdimir](https://github.com/vdimir)).
* Avoid storing logs in Keeper containing unknown operation [#50751](https://github.com/ClickHouse/ClickHouse/pull/50751) ([Antonio Andelic](https://github.com/antonio2368)).
* Add compat setting for non-const timezones [#50834](https://github.com/ClickHouse/ClickHouse/pull/50834) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix iceberg V2 optional metadata parsing [#50974](https://github.com/ClickHouse/ClickHouse/pull/50974) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix subtly broken copy-on-write of ColumnLowCardinality dictionary [#51064](https://github.com/ClickHouse/ClickHouse/pull/51064) ([Michael Kolupaev](https://github.com/al13n321)).
* Generate safe IVs [#51086](https://github.com/ClickHouse/ClickHouse/pull/51086) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Don't mark a part as broken on `Poco::TimeoutException` [#50811](https://github.com/ClickHouse/ClickHouse/pull/50811) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix a versions' tweak for tagged commits, improve version_helper [#51035](https://github.com/ClickHouse/ClickHouse/pull/51035) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Sqlancer has changed master to main [#51060](https://github.com/ClickHouse/ClickHouse/pull/51060) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -233,6 +233,12 @@ libhdfs3 support HDFS namenode HA.
- `_path` — Path to the file.
- `_file` — Name of the file.
## Storage Settings {#storage-settings}
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
**See Also**
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)

View File

@ -35,6 +35,10 @@ The table structure can differ from the original MySQL table structure:
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types.
- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
:::note
The MySQL Table Engine is currently not available on the ClickHouse builds for MacOS ([issue](https://github.com/ClickHouse/ClickHouse/issues/21191))
:::
**Engine Parameters**
- `host:port` — MySQL server address.

View File

@ -136,7 +136,7 @@ postgresql> SELECT * FROM test;
### Creating Table in ClickHouse, and connecting to PostgreSQL table created above
This example uses the [PostgreSQL table engine](/docs/en/engines/table-engines/integrations/postgresql.md) to connect the ClickHouse table to the PostgreSQL table:
This example uses the [PostgreSQL table engine](/docs/en/engines/table-engines/integrations/postgresql.md) to connect the ClickHouse table to the PostgreSQL table and use both SELECT and INSERT statements to the PostgreSQL database:
``` sql
CREATE TABLE default.postgresql_table
@ -150,10 +150,21 @@ ENGINE = PostgreSQL('localhost:5432', 'public', 'test', 'postges_user', 'postgre
### Inserting initial data from PostgreSQL table into ClickHouse table, using a SELECT query
The [postgresql table function](/docs/en/sql-reference/table-functions/postgresql.md) copies the data from PostgreSQL to ClickHouse, which is often used for improving the query performance of the data by querying or performing analytics in ClickHouse rather than in PostgreSQL, or can also be used for migrating data from PostgreSQL to ClickHouse:
The [postgresql table function](/docs/en/sql-reference/table-functions/postgresql.md) copies the data from PostgreSQL to ClickHouse, which is often used for improving the query performance of the data by querying or performing analytics in ClickHouse rather than in PostgreSQL, or can also be used for migrating data from PostgreSQL to ClickHouse. Since we will be copying the data from PostgreSQL to ClickHouse, we will use a MergeTree table engine in ClickHouse and call it postgresql_copy:
``` sql
INSERT INTO default.postgresql_table
CREATE TABLE default.postgresql_copy
(
`float_nullable` Nullable(Float32),
`str` String,
`int_id` Int32
)
ENGINE = MergeTree
ORDER BY (int_id);
```
``` sql
INSERT INTO default.postgresql_copy
SELECT * FROM postgresql('localhost:5432', 'public', 'test', 'postges_user', 'postgres_password');
```
@ -164,13 +175,13 @@ If then performing ongoing synchronization between the PostgreSQL table and Clic
This would require keeping track of the max ID or timestamp previously added, such as the following:
``` sql
SELECT max(`int_id`) AS maxIntID FROM default.postgresql_table;
SELECT max(`int_id`) AS maxIntID FROM default.postgresql_copy;
```
Then inserting values from PostgreSQL table greater than the max
``` sql
INSERT INTO default.postgresql_table
INSERT INTO default.postgresql_copy
SELECT * FROM postgresql('localhost:5432', 'public', 'test', 'postges_user', 'postgres_password');
WHERE int_id > maxIntID;
```
@ -178,7 +189,7 @@ WHERE int_id > maxIntID;
### Selecting data from the resulting ClickHouse table
``` sql
SELECT * FROM postgresql_table WHERE str IN ('test');
SELECT * FROM postgresql_copy WHERE str IN ('test');
```
``` text

View File

@ -1,5 +1,5 @@
---
slug: /en/sql-reference/table-functions/redis
slug: /en/engines/table-engines/integrations/redis
sidebar_position: 43
sidebar_label: Redis
---
@ -34,7 +34,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name
- `primary` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a Redis key.
- columns other than the primary key will be serialized in binary as Redis value in corresponding order.
- queries with key equals or in filtering will be optimized to multi keys lookup from Redis. If queries without filtering key full table scan will happen which is a heavy operation.
## Usage Example {#usage-example}

View File

@ -127,6 +127,12 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/*', 'CSV');
```
## Storage Settings {#storage-settings}
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
## S3-related Settings {#settings}
The following settings can be set before query execution or placed into configuration file.

View File

@ -853,7 +853,7 @@ Tags:
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volumes disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved.
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
- `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3).
- `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default (if enabled) if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3). If disabled then already expired data part is written into a default volume and then right after moved to TTL volume.
- `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`.
Configuration examples:

View File

@ -92,3 +92,11 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
## Settings {#settings}
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.

View File

@ -102,3 +102,7 @@ SELECT * FROM url_engine_table
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
## Storage Settings {#storage-settings}
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.

View File

@ -470,6 +470,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
- [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`.
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
## CSVWithNames {#csvwithnames}
@ -1877,13 +1878,13 @@ The table below shows supported data types and how they match ClickHouse [data t
| `string (uuid)` \** | [UUID](/docs/en/sql-reference/data-types/uuid.md) | `string (uuid)` \** |
| `fixed(16)` | [Int128/UInt128](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(16)` |
| `fixed(32)` | [Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(32)` |
| `record` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `record` |
\* `bytes` is default, controlled by [output_format_avro_string_column_pattern](/docs/en/operations/settings/settings-formats.md/#output_format_avro_string_column_pattern)
\** [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
Unsupported Avro data types: `record` (non-root), `map`
Unsupported Avro logical data types: `time-millis`, `time-micros`, `duration`
### Inserting Data {#inserting-data-1}
@ -1922,7 +1923,26 @@ Output Avro file compression and sync interval can be configured with [output_fo
Using the ClickHouse [DESCRIBE](/docs/en/sql-reference/statements/describe-table) function, you can quickly view the inferred format of an Avro file like the following example. This example includes the URL of a publicly accessible Avro file in the ClickHouse S3 public bucket:
``` DESCRIBE url('https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits.avro','Avro');
```
DESCRIBE url('https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits.avro','Avro);
```
```
┌─name───────────────────────┬─type────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ WatchID │ Int64 │ │ │ │ │ │
│ JavaEnable │ Int32 │ │ │ │ │ │
│ Title │ String │ │ │ │ │ │
│ GoodEvent │ Int32 │ │ │ │ │ │
│ EventTime │ Int32 │ │ │ │ │ │
│ EventDate │ Date32 │ │ │ │ │ │
│ CounterID │ Int32 │ │ │ │ │ │
│ ClientIP │ Int32 │ │ │ │ │ │
│ ClientIP6 │ FixedString(16) │ │ │ │ │ │
│ RegionID │ Int32 │ │ │ │ │ │
...
│ IslandID │ FixedString(16) │ │ │ │ │ │
│ RequestNum │ Int32 │ │ │ │ │ │
│ RequestTry │ Int32 │ │ │ │ │ │
└────────────────────────────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
```
## AvroConfluent {#data-format-avro-confluent}

View File

@ -83,6 +83,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
- [`compression_method`](/docs/en/sql-reference/statements/create/table.md/#column-compression-codecs) and compression_level
- `password` for the file on disk
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
### Usage examples
@ -398,4 +399,4 @@ To disallow concurrent backup/restore, you can use these settings respectively.
```
The default value for both is true, so by default concurrent backup/restores are allowed.
When these settings are false on a cluster, only 1 backup/restore is allowed to run on a cluster at a time.
When these settings are false on a cluster, only 1 backup/restore is allowed to run on a cluster at a time.

View File

@ -202,7 +202,7 @@ Default: 15
## dns_max_consecutive_failures
Max connection failures before dropping host from ClickHouse DNS cache
Max consecutive resolving failures before dropping a host from ClickHouse DNS cache
Type: UInt32

View File

@ -932,6 +932,38 @@ Result
" string "
```
### input_format_csv_allow_whitespace_or_tab_as_delimiter {#input_format_csv_allow_whitespace_or_tab_as_delimiter}
Allow to use whitespace or tab as field delimiter in CSV strings.
Default value: `false`.
**Examples**
Query
```bash
echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_allow_whitespace_or_tab_as_delimiter=true --format_csv_delimiter=' '
```
Result
```text
a b
```
Query
```bash
echo 'a b' | ./clickhouse local -q "select * from table FORMAT CSV" --input-format="CSV" --input_format_csv_allow_whitespace_or_tab_as_delimiter=true --format_csv_delimiter='\t'
```
Result
```text
a b
```
## Values format settings {#values-format-settings}
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}

View File

@ -3328,7 +3328,35 @@ Possible values:
Default value: `0`.
## s3_truncate_on_insert
## engine_file_allow_create_multiple_files {#engine_file_allow_create_multiple_files}
Enables or disables creating a new file on each insert in file engine tables if the format has the suffix (`JSON`, `ORC`, `Parquet`, etc.). If enabled, on each insert a new file will be created with a name following this pattern:
`data.Parquet` -> `data.1.Parquet` -> `data.2.Parquet`, etc.
Possible values:
- 0 — `INSERT` query appends new data to the end of the file.
- 1 — `INSERT` query creates a new file.
Default value: `0`.
## engine_file_skip_empty_files {#engine_file_skip_empty_files}
Enables or disables skipping empty files in [File](../../engines/table-engines/special/file.md) engine tables.
Possible values:
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
- 1 — `SELECT` returns empty result for empty file.
Default value: `0`.
## storage_file_read_method {#storage_file_read_method}
Method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local).
Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
## s3_truncate_on_insert {#s3_truncate_on_insert}
Enables or disables truncate before inserts in s3 engine tables. If disabled, an exception will be thrown on insert attempts if an S3 object already exists.
@ -3338,7 +3366,29 @@ Possible values:
Default value: `0`.
## hdfs_truncate_on_insert
## s3_create_new_file_on_insert {#s3_create_new_file_on_insert}
Enables or disables creating a new file on each insert in s3 engine tables. If enabled, on each insert a new S3 object will be created with the key, similar to this pattern:
initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
Possible values:
- 0 — `INSERT` query appends new data to the end of the file.
- 1 — `INSERT` query creates a new file.
Default value: `0`.
## s3_skip_empty_files {#s3_skip_empty_files}
Enables or disables skipping empty files in [S3](../../engines/table-engines/integrations/s3.md) engine tables.
Possible values:
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
- 1 — `SELECT` returns empty result for empty file.
Default value: `0`.
## hdfs_truncate_on_insert {#hdfs_truncate_on_insert}
Enables or disables truncation before an insert in hdfs engine tables. If disabled, an exception will be thrown on an attempt to insert if a file in HDFS already exists.
@ -3348,31 +3398,7 @@ Possible values:
Default value: `0`.
## engine_file_allow_create_multiple_files
Enables or disables creating a new file on each insert in file engine tables if the format has the suffix (`JSON`, `ORC`, `Parquet`, etc.). If enabled, on each insert a new file will be created with a name following this pattern:
`data.Parquet` -> `data.1.Parquet` -> `data.2.Parquet`, etc.
Possible values:
- 0 — `INSERT` query appends new data to the end of the file.
- 1 — `INSERT` query replaces existing content of the file with the new data.
Default value: `0`.
## s3_create_new_file_on_insert
Enables or disables creating a new file on each insert in s3 engine tables. If enabled, on each insert a new S3 object will be created with the key, similar to this pattern:
initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
Possible values:
- 0 — `INSERT` query appends new data to the end of the file.
- 1 — `INSERT` query replaces existing content of the file with the new data.
Default value: `0`.
## hdfs_create_new_file_on_insert
## hdfs_create_new_file_on_insert {#hdfs_create_new_file_on_insert
Enables or disables creating a new file on each insert in HDFS engine tables. If enabled, on each insert a new HDFS file will be created with the name, similar to this pattern:
@ -3380,7 +3406,27 @@ initial: `data.Parquet.gz` -> `data.1.Parquet.gz` -> `data.2.Parquet.gz`, etc.
Possible values:
- 0 — `INSERT` query appends new data to the end of the file.
- 1 — `INSERT` query replaces existing content of the file with the new data.
- 1 — `INSERT` query creates a new file.
Default value: `0`.
## hdfs_skip_empty_files {#hdfs_skip_empty_files}
Enables or disables skipping empty files in [HDFS](../../engines/table-engines/integrations/hdfs.md) engine tables.
Possible values:
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
- 1 — `SELECT` returns empty result for empty file.
Default value: `0`.
## engine_url_skip_empty_files {#engine_url_skip_empty_files}
Enables or disables skipping empty files in [URL](../../engines/table-engines/special/url.md) engine tables.
Possible values:
- 0 — `SELECT` throws an exception if empty file is not compatible with requested format.
- 1 — `SELECT` returns empty result for empty file.
Default value: `0`.

View File

@ -32,7 +32,7 @@ For example, Decimal32(4) can contain numbers from -99999.9999 to 99999.9999 wit
Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string.
Because modern CPUs do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64.
Because modern CPUs do not support 128-bit and 256-bit integers natively, operations on Decimal128 and Decimal256 are emulated. Thus, Decimal128 and Decimal256 work significantly slower than Decimal32/Decimal64.
## Operations and Result Type
@ -59,6 +59,10 @@ Some functions on Decimal return result as Float64 (for example, var or stddev).
During calculations on Decimal, integer overflows might happen. Excessive digits in a fraction are discarded (not rounded). Excessive digits in integer part will lead to an exception.
:::warning
Overflow check is not implemented for Decimal128 and Decimal256. In case of overflow incorrect result is returned, no exception is thrown.
:::
``` sql
SELECT toDecimal32(2, 4) AS x, x / 3
```

View File

@ -1509,10 +1509,12 @@ parseDateTimeBestEffort(time_string [, time_zone])
- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
- A string with a date and a time component: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc.
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `YYYY-MM` are substituted as `2000-01`.
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `MM` is substituted by `01`.
- A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`.
- A [syslog timestamp](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2): `Mmm dd hh:mm:ss`. For example, `Jun 9 14:20:32`.
For all of the formats with separator the function parses months names expressed by their full name or by the first three letters of a month name. Examples: `24/DEC/18`, `24-Dec-18`, `01-September-2018`.
If the year is not specified, it is considered to be equal to the current year. If the resulting DateTime happen to be in the future (even by a second after the current moment), then the current year is substituted by the previous year.
**Returned value**
@ -1583,23 +1585,46 @@ Result:
Query:
``` sql
SELECT parseDateTimeBestEffort('10 20:19');
SELECT toYear(now()) as year, parseDateTimeBestEffort('10 20:19');
```
Result:
```response
┌─parseDateTimeBestEffort('10 20:19')─┐
│ 2000-01-10 20:19:00 │
└─────────────────────────────────────┘
┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐
│ 2023 │ 2023-01-10 20:19:00 │
└──────┴─────────────────────────────────────┘
```
Query:
``` sql
WITH
now() AS ts_now,
formatDateTime(ts_around, '%b %e %T') AS syslog_arg
SELECT
ts_now,
syslog_arg,
parseDateTimeBestEffort(syslog_arg)
FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around);
```
Result:
```response
┌──────────────ts_now─┬─syslog_arg──────┬─parseDateTimeBestEffort(syslog_arg)─┐
│ 2023-06-30 23:59:30 │ Jun 30 23:59:00 │ 2023-06-30 23:59:00 │
│ 2023-06-30 23:59:30 │ Jul 1 00:00:00 │ 2022-07-01 00:00:00 │
└─────────────────────┴─────────────────┴─────────────────────────────────────┘
```
**See Also**
- [RFC 1123](https://tools.ietf.org/html/rfc1123)
- [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123)
- [toDate](#todate)
- [toDateTime](#todatetime)
- [ISO 8601 announcement by @xkcd](https://xkcd.com/1179/)
- [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2)
## parseDateTimeBestEffortUS

View File

@ -55,6 +55,9 @@ With the described implementation now we can see what can negatively affect 'DEL
- Table having a very large number of data parts
- Having a lot of data in Compact parts—in a Compact part, all columns are stored in one file.
:::note
Currently, Lightweight delete does not work for tables with projection as rows in projection may be affected and require the projection to be rebuilt. Rebuilding projection makes the deletion not lightweight, so this is not supported.
:::
## Related content

View File

@ -196,6 +196,16 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3
- `_path` — Path to the file.
- `_file` — Name of the file.
## Settings
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
**See Also**
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)

View File

@ -1,7 +1,7 @@
---
slug: /en/sql-reference/table-functions/gcs
sidebar_position: 45
sidebar_label: s3
sidebar_label: gcs
keywords: [gcs, bucket]
---

View File

@ -97,6 +97,12 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin
- `_path` — Path to the file.
- `_file` — Name of the file.
## Storage Settings {#storage-settings}
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
**See Also**
- [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns)

View File

@ -107,6 +107,30 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123');
└────────┴───────┘
```
Copying data from MySQL table into ClickHouse table:
```sql
CREATE TABLE mysql_copy
(
`id` UInt64,
`datetime` DateTime('UTC'),
`description` String,
)
ENGINE = MergeTree
ORDER BY (id,datetime);
INSERT INTO mysql_copy
SELECT * FROM mysql('host:port', 'database', 'table', 'user', 'password');
```
Or if copying only an incremental batch from MySQL based on the max current id:
```sql
INSERT INTO mysql_copy
SELECT * FROM mysql('host:port', 'database', 'table', 'user', 'password')
WHERE id > (SELECT max(id) from mysql_copy);
```
**See Also**
- [The MySQL table engine](../../engines/table-engines/integrations/mysql.md)

View File

@ -1,10 +1,10 @@
---
slug: /en/sql-reference/table-functions/redis
sidebar_position: 10
sidebar_label: Redis
sidebar_position: 43
sidebar_label: redis
---
# Redis
# redis
This table function allows integrating ClickHouse with [Redis](https://redis.io/).

View File

@ -202,6 +202,12 @@ FROM s3(
LIMIT 5;
```
## Storage Settings {#storage-settings}
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
**See Also**
- [S3 engine](../../engines/table-engines/integrations/s3.md)

View File

@ -53,6 +53,10 @@ Character `|` inside patterns is used to specify failover addresses. They are it
- `_path` — Path to the `URL`.
- `_file` — Resource name of the `URL`.
## Storage Settings {#storage-settings}
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
**See Also**
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)

View File

@ -31,7 +31,7 @@ sidebar_label: Decimal
## Внутреннее представление {#vnutrennee-predstavlenie}
Внутри данные представляются как знаковые целые числа, соответсвующей разрядности. Реальные диапазоны, хранящиеся в ячейках памяти несколько больше заявленных. Заявленные диапазоны Decimal проверяются только при вводе числа из строкового представления.
Поскольку современные CPU не поддерживают 128-битные числа, операции над Decimal128 эмулируются программно. Decimal128 работает в разы медленней чем Decimal32/Decimal64.
Поскольку современные CPU не поддерживают 128-битные и 256-битные числа, для операций над Decimal128 и Decimal256 эмулируются программно. Данные типы работают в разы медленнее, чем Decimal32/Decimal64.
## Операции и типы результата {#operatsii-i-tipy-rezultata}
@ -59,6 +59,10 @@ sidebar_label: Decimal
При выполнении операций над типом Decimal могут происходить целочисленные переполнения. Лишняя дробная часть отбрасывается (не округляется). Лишняя целочисленная часть приводит к исключению.
:::warning
Проверка переполнения не реализована для Decimal128 и Decimal256. В случае переполнения неверный результат будёт возвращён без выбрасывания исключения.
:::
``` sql
SELECT toDecimal32(2, 4) AS x, x / 3
```

View File

@ -1223,10 +1223,12 @@ parseDateTimeBestEffort(time_string[, time_zone])
- [Unix timestamp](https://ru.wikipedia.org/wiki/Unix-время) в строковом представлении. 9 или 10 символов.
- Строка с датой и временем: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
- Строка с датой, но без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` и т.д.
- Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` принимается равным `2000-01`.
- Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `MM` принимается равным `01`.
- Строка, содержащая дату и время вместе с информацией о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm`, и т.д. Например, `2020-12-12 17:36:00 -5:00`.
- Строка, содержащая дату и время в формате [syslog timestamp](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2): `Mmm dd hh:mm:ss`. Например, `Jun 9 14:20:32`.
Для всех форматов с разделителями функция распознаёт названия месяцев, выраженных в виде полного англоязычного имени месяца или в виде первых трёх символов имени месяца. Примеры: `24/DEC/18`, `24-Dec-18`, `01-September-2018`.
Если год не указан, вместо него подставляется текущий год. Если в результате получается будущее время (даже на одну секунду впереди текущего момента времени), то текущий год заменяется на прошлый.
**Возвращаемое значение**
@ -1297,23 +1299,46 @@ AS parseDateTimeBestEffort;
Запрос:
``` sql
SELECT parseDateTimeBestEffort('10 20:19');
SELECT toYear(now()) as year, parseDateTimeBestEffort('10 20:19');
```
Результат:
``` text
┌─parseDateTimeBestEffort('10 20:19')─┐
│ 2000-01-10 20:19:00 │
└─────────────────────────────────────┘
┌─year─┬─parseDateTimeBestEffort('10 20:19')─┐
│ 2023 │ 2023-01-10 20:19:00 │
└──────┴─────────────────────────────────────┘
```
Запрос:
``` sql
WITH
now() AS ts_now,
formatDateTime(ts_around, '%b %e %T') AS syslog_arg
SELECT
ts_now,
syslog_arg,
parseDateTimeBestEffort(syslog_arg)
FROM (SELECT arrayJoin([ts_now - 30, ts_now + 30]) AS ts_around);
```
Результат:
``` text
┌──────────────ts_now─┬─syslog_arg──────┬─parseDateTimeBestEffort(syslog_arg)─┐
│ 2023-06-30 23:59:30 │ Jun 30 23:59:00 │ 2023-06-30 23:59:00 │
│ 2023-06-30 23:59:30 │ Jul 1 00:00:00 │ 2022-07-01 00:00:00 │
└─────────────────────┴─────────────────┴─────────────────────────────────────┘
```
**Смотрите также**
- [Информация о формате ISO 8601 от @xkcd](https://xkcd.com/1179/)
- [RFC 1123](https://tools.ietf.org/html/rfc1123)
- [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123)
- [toDate](#todate)
- [toDateTime](#todatetime)
- [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164#section-4.1.2)
## parseDateTimeBestEffortUS {#parsedatetimebesteffortUS}

View File

@ -1,6 +1,6 @@
---
slug: /ru/whats-new/changelog/2017
sidebar_position: 6
sidebar_position: 60
sidebar_label: 2017
title: 2017 Changelog
---

View File

@ -1,6 +1,6 @@
---
slug: /ru/whats-new/changelog/2018
sidebar_position: 5
sidebar_position: 50
sidebar_label: 2018
title: 2018 Changelog
---

View File

@ -1,6 +1,6 @@
---
slug: /ru/whats-new/changelog/2019
sidebar_position: 4
sidebar_position: 40
sidebar_label: 2019
title: 2019 Changelog
---

View File

@ -1,6 +1,6 @@
---
slug: /ru/whats-new/changelog/2020
sidebar_position: 3
sidebar_position: 30
sidebar_label: 2020
title: 2020 Changelog
---

View File

@ -1,6 +1,6 @@
---
slug: /ru/whats-new/changelog/2021
sidebar_position: 2
sidebar_position: 20
sidebar_label: 2021
title: 2021 Changelog
---

View File

@ -0,0 +1,10 @@
---
slug: /ru/whats-new/changelog/2022
sidebar_position: 10
sidebar_label: 2022
title: 2022 Changelog
---
import Changelog from '@site/docs/en/whats-new/changelog/2022.md';
<Changelog />

View File

@ -2,5 +2,5 @@ label: 'Changelog'
collapsible: true
collapsed: true
link:
type: doc
id: ru/whats-new/changelog/index
type: generated-index
title: Changelog

View File

@ -1,7 +1,7 @@
---
sidebar_position: 1
sidebar_label: 2022
title: 2022 Changelog
sidebar_label: 2023
title: 2023 Changelog
slug: /ru/whats-new/changelog/index
---

View File

@ -6,4 +6,4 @@ sidebar_label: Changelog
# Changelog
You can view the latest Changelog at [https://clickhouse.com/docs/en/whats-new/changelog/](https://clickhouse.com/docs/en/whats-new/changelog/)
You can view the latest Changelog at [https://clickhouse.com/docs/en/whats-new/changelog/](/docs/en/whats-new/changelog/index.md)

View File

@ -409,8 +409,15 @@ if (ENABLE_CLICKHOUSE_KEEPER_CONVERTER)
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-converter)
endif ()
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT)
add_custom_target (clickhouse-keeper-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-keeper-client DEPENDS clickhouse)
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper-client" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
if (NOT BUILD_STANDALONE_KEEPER)
add_custom_target (clickhouse-keeper-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-keeper-client DEPENDS clickhouse)
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper-client" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
# symlink to standalone keeper binary
else ()
add_custom_target (clickhouse-keeper-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse-keeper clickhouse-keeper-client DEPENDS clickhouse-keeper)
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-keeper-client" DESTINATION "${CMAKE_INSTALL_BINDIR}" COMPONENT clickhouse-keeper)
endif ()
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-client)
endif ()
if (ENABLE_CLICKHOUSE_DISKS)

View File

@ -977,13 +977,7 @@ void Client::addOptions(OptionsDescription & options_description)
("connection", po::value<std::string>(), "connection to use (from the client config), by default connection name is hostname")
("secure,s", "Use TLS connection")
("user,u", po::value<std::string>()->default_value("default"), "user")
/** If "--password [value]" is used but the value is omitted, the bad argument exception will be thrown.
* implicit_value is used to avoid this exception (to allow user to type just "--password")
* Since currently boost provides no way to check if a value has been set implicitly for an option,
* the "\n" is used to distinguish this case because there is hardly a chance a user would use "\n"
* as the password.
*/
("password", po::value<std::string>()->implicit_value("\n", ""), "password")
("password", po::value<std::string>(), "password")
("ask-password", "ask-password")
("quota_key", po::value<std::string>(), "A string to differentiate quotas when the user have keyed quotas configured on server")
@ -1397,6 +1391,14 @@ void Client::readArguments(
arg = argv[arg_num];
addMultiquery(arg, common_arguments);
}
else if (arg == "--password" && ((arg_num + 1) >= argc || std::string_view(argv[arg_num + 1]).starts_with('-')))
{
common_arguments.emplace_back(arg);
/// No password was provided by user. Add '\n' as implicit password,
/// which encodes that client should ask user for the password.
/// '\n' is used because there is hardly a chance that a user would use '\n' as a password.
common_arguments.emplace_back("\n");
}
else
common_arguments.emplace_back(arg);
}

View File

@ -127,42 +127,42 @@ void KeeperClient::defineOptions(Poco::Util::OptionSet & options)
options.addOption(
Poco::Util::Option("host", "h", "server hostname. default `localhost`")
.argument("host")
.argument("<host>")
.binding("host"));
options.addOption(
Poco::Util::Option("port", "p", "server port. default `2181`")
.argument("port")
.argument("<port>")
.binding("port"));
options.addOption(
Poco::Util::Option("query", "q", "will execute given query, then exit.")
.argument("query")
.argument("<query>")
.binding("query"));
options.addOption(
Poco::Util::Option("connection-timeout", "", "set connection timeout in seconds. default 10s.")
.argument("connection-timeout")
.argument("<seconds>")
.binding("connection-timeout"));
options.addOption(
Poco::Util::Option("session-timeout", "", "set session timeout in seconds. default 10s.")
.argument("session-timeout")
.argument("<seconds>")
.binding("session-timeout"));
options.addOption(
Poco::Util::Option("operation-timeout", "", "set operation timeout in seconds. default 10s.")
.argument("operation-timeout")
.argument("<seconds>")
.binding("operation-timeout"));
options.addOption(
Poco::Util::Option("history-file", "", "set path of history file. default `~/.keeper-client-history`")
.argument("history-file")
.argument("<file>")
.binding("history-file"));
options.addOption(
Poco::Util::Option("log-level", "", "set log level")
.argument("log-level")
.argument("<level>")
.binding("log-level"));
}

View File

@ -112,6 +112,18 @@ if (BUILD_STANDALONE_KEEPER)
clickhouse-keeper.cpp
)
# List of resources for clickhouse-keeper client
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT)
list(APPEND CLICKHOUSE_KEEPER_STANDALONE_SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/../../programs/keeper-client/KeeperClient.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../programs/keeper-client/Commands.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../programs/keeper-client/Parser.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Client/LineReader.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Client/ReplxxLineReader.cpp
)
endif()
clickhouse_add_executable(clickhouse-keeper ${CLICKHOUSE_KEEPER_STANDALONE_SOURCES})
# Remove some redundant dependencies
@ -122,6 +134,10 @@ if (BUILD_STANDALONE_KEEPER)
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/../../src/Core/include") # uses some includes from core
target_include_directories(clickhouse-keeper PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/../../src") # uses some includes from common
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT AND TARGET ch_rust::skim)
target_link_libraries(clickhouse-keeper PRIVATE ch_rust::skim)
endif()
target_link_libraries(clickhouse-keeper
PRIVATE
ch_contrib::abseil_swiss_tables

View File

@ -34,6 +34,8 @@
#include "Core/Defines.h"
#include "config.h"
#include "config_version.h"
#include "config_tools.h"
#if USE_SSL
# include <Poco/Net/Context.h>
@ -131,7 +133,10 @@ int Keeper::run()
if (config().hasOption("help"))
{
Poco::Util::HelpFormatter help_formatter(Keeper::options());
auto header_str = fmt::format("{} [OPTION] [-- [ARG]...]\n"
auto header_str = fmt::format("{0} [OPTION] [-- [ARG]...]\n"
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
"{0} client [OPTION]\n"
#endif
"positional arguments can be used to rewrite config.xml properties, for example, --http_port=8010",
commandName());
help_formatter.setHeader(header_str);

View File

@ -1,6 +1,30 @@
#include <Common/StringUtils/StringUtils.h>
#include "config_tools.h"
int mainEntryClickHouseKeeper(int argc, char ** argv);
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
int mainEntryClickHouseKeeperClient(int argc, char ** argv);
#endif
int main(int argc_, char ** argv_)
{
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
if (argc_ >= 2)
{
/// 'clickhouse-keeper --client ...' and 'clickhouse-keeper client ...' are OK
if (strcmp(argv_[1], "--client") == 0 || strcmp(argv_[1], "client") == 0)
{
argv_[1] = argv_[0];
return mainEntryClickHouseKeeperClient(--argc_, argv_ + 1);
}
}
if (argc_ > 0 && (strcmp(argv_[0], "clickhouse-keeper-client") == 0 || endsWith(argv_[0], "/clickhouse-keeper-client")))
return mainEntryClickHouseKeeperClient(argc_, argv_);
#endif
return mainEntryClickHouseKeeper(argc_, argv_);
}

View File

@ -1705,7 +1705,6 @@ try
#endif
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
async_metrics.start();
{

View File

@ -333,7 +333,7 @@ void ContextAccess::calculateAccessRights() const
boost::algorithm::join(roles_info->getCurrentRolesNames(), ", "),
boost::algorithm::join(roles_info->getEnabledRolesNames(), ", "));
}
LOG_TRACE(trace_log, "Settings: readonly={}, allow_ddl={}, allow_introspection_functions={}", params.readonly, params.allow_ddl, params.allow_introspection);
LOG_TRACE(trace_log, "Settings: readonly = {}, allow_ddl = {}, allow_introspection_functions = {}", params.readonly, params.allow_ddl, params.allow_introspection);
LOG_TRACE(trace_log, "List of all grants: {}", access->toString());
LOG_TRACE(trace_log, "List of all grants including implicit: {}", access_with_implicit->toString());
}

View File

@ -5,6 +5,7 @@
#include <Access/GSSAcceptor.h>
#include <base/defines.h>
#include <base/types.h>
#include <base/extended_types.h>
#include <chrono>
#include <map>
@ -42,7 +43,7 @@ public:
private:
struct LDAPCacheEntry
{
std::size_t last_successful_params_hash = 0;
UInt128 last_successful_params_hash = 0;
std::chrono::steady_clock::time_point last_successful_authentication_timestamp;
LDAPClient::SearchResultsList last_successful_role_search_results;
};

View File

@ -185,11 +185,10 @@ void BackupCoordinationReplicatedTables::addPartNames(PartNamesForTableReplica &
const String & other_replica_name = **other.replica_names.begin();
throw Exception(
ErrorCodes::CANNOT_BACKUP_TABLE,
"Table {} on replica {} has part {} which is different from the part on replica {}. Must be the same",
table_name_for_logs,
replica_name,
part_name,
other_replica_name);
"Table {} on replica {} has part {} different from the part on replica {} "
"(checksum '{}' on replica {} != checksum '{}' on replica {})",
table_name_for_logs, replica_name, part_name, other_replica_name,
getHexUIntLowercase(checksum), replica_name, getHexUIntLowercase(other.checksum), other_replica_name);
}
}

View File

@ -85,6 +85,9 @@ void BackupCoordinationStageSync::setError(const String & current_host, const Ex
writeException(exception, buf, true);
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
/// When backup/restore fails, it removes the nodes from Zookeeper.
/// Sometimes it fails to remove all nodes. It's possible that it removes /error node, but fails to remove /stage node,
/// so the following line tries to preserve the error status.
auto code = zookeeper->trySet(zookeeper_path, Stage::ERROR);
if (code != Coordination::Error::ZOK)
throw zkutil::KeeperException(code, zookeeper_path);

View File

@ -152,8 +152,7 @@ namespace
}
catch (...)
{
if (coordination)
coordination->setError(Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode()));
sendExceptionToCoordination(coordination, Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode()));
}
}

View File

@ -18,7 +18,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
String client_name,
Protocol::Compression compression,
Protocol::Secure secure,
Int64 priority)
Priority priority)
{
Key key{
max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
@ -74,7 +74,7 @@ size_t ConnectionPoolFactory::KeyHash::operator()(const ConnectionPoolFactory::K
hash_combine(seed, hash_value(k.client_name));
hash_combine(seed, hash_value(k.compression));
hash_combine(seed, hash_value(k.secure));
hash_combine(seed, hash_value(k.priority));
hash_combine(seed, hash_value(k.priority.value));
return seed;
}

View File

@ -1,6 +1,7 @@
#pragma once
#include <Common/PoolBase.h>
#include <Common/Priority.h>
#include <Client/Connection.h>
#include <IO/ConnectionTimeouts.h>
#include <Core/Settings.h>
@ -34,7 +35,7 @@ public:
const Settings * settings = nullptr,
bool force_connected = true) = 0;
virtual Int64 getPriority() const { return 1; }
virtual Priority getPriority() const { return Priority{1}; }
};
using ConnectionPoolPtr = std::shared_ptr<IConnectionPool>;
@ -60,7 +61,7 @@ public:
const String & client_name_,
Protocol::Compression compression_,
Protocol::Secure secure_,
Int64 priority_ = 1)
Priority priority_ = Priority{1})
: Base(max_connections_,
&Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
host(host_),
@ -103,7 +104,7 @@ public:
return host + ":" + toString(port);
}
Int64 getPriority() const override
Priority getPriority() const override
{
return priority;
}
@ -134,7 +135,7 @@ private:
String client_name;
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.
Int64 priority; /// priority from <remote_servers>
Priority priority; /// priority from <remote_servers>
};
/**
@ -157,7 +158,7 @@ public:
String client_name;
Protocol::Compression compression;
Protocol::Secure secure;
Int64 priority;
Priority priority;
};
struct KeyHash
@ -180,7 +181,7 @@ public:
String client_name,
Protocol::Compression compression,
Protocol::Secure secure,
Int64 priority);
Priority priority);
private:
mutable std::mutex mutex;
using ConnectionPoolWeakPtr = std::weak_ptr<IConnectionPool>;

View File

@ -71,7 +71,7 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts
return Base::get(max_ignored_errors, fallback_to_stale_replicas, try_get_entry, get_priority);
}
Int64 ConnectionPoolWithFailover::getPriority() const
Priority ConnectionPoolWithFailover::getPriority() const
{
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto & a, const auto & b)
{

View File

@ -48,7 +48,7 @@ public:
const Settings * settings,
bool force_connected) override; /// From IConnectionPool
Int64 getPriority() const override; /// From IConnectionPool
Priority getPriority() const override; /// From IConnectionPool
/** Allocates up to the specified number of connections to work.
* Connections provide access to different replicas of one shard.

View File

@ -151,13 +151,13 @@ public:
ColumnPtr compress() const override;
void forEachSubcolumn(ColumnCallback callback) const override
void forEachSubcolumn(MutableColumnCallback callback) override
{
callback(offsets);
callback(data);
}
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
{
callback(*offsets);
offsets->forEachSubcolumnRecursively(callback);

View File

@ -230,12 +230,12 @@ public:
data->getExtremes(min, max);
}
void forEachSubcolumn(ColumnCallback callback) const override
void forEachSubcolumn(MutableColumnCallback callback) override
{
callback(data);
}
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
{
callback(*data);
data->forEachSubcolumnRecursively(callback);

View File

@ -166,7 +166,7 @@ public:
size_t byteSizeAt(size_t n) const override { return getDictionary().byteSizeAt(getIndexes().getUInt(n)); }
size_t allocatedBytes() const override { return idx.getPositions()->allocatedBytes() + getDictionary().allocatedBytes(); }
void forEachSubcolumn(ColumnCallback callback) const override
void forEachSubcolumn(MutableColumnCallback callback) override
{
callback(idx.getPositionsPtr());
@ -175,7 +175,7 @@ public:
callback(dictionary.getColumnUniquePtr());
}
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
{
callback(*idx.getPositionsPtr());
idx.getPositionsPtr()->forEachSubcolumnRecursively(callback);
@ -340,7 +340,7 @@ private:
explicit Dictionary(MutableColumnPtr && column_unique, bool is_shared);
explicit Dictionary(ColumnPtr column_unique, bool is_shared);
const ColumnPtr & getColumnUniquePtr() const { return column_unique; }
const WrappedPtr & getColumnUniquePtr() const { return column_unique; }
WrappedPtr & getColumnUniquePtr() { return column_unique; }
const IColumnUnique & getColumnUnique() const { return static_cast<const IColumnUnique &>(*column_unique); }

View File

@ -273,12 +273,12 @@ void ColumnMap::getExtremes(Field & min, Field & max) const
max = std::move(map_max_value);
}
void ColumnMap::forEachSubcolumn(ColumnCallback callback) const
void ColumnMap::forEachSubcolumn(MutableColumnCallback callback)
{
callback(nested);
}
void ColumnMap::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
void ColumnMap::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
{
callback(*nested);
nested->forEachSubcolumnRecursively(callback);

View File

@ -88,8 +88,8 @@ public:
size_t byteSizeAt(size_t n) const override;
size_t allocatedBytes() const override;
void protect() override;
void forEachSubcolumn(ColumnCallback callback) const override;
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
void forEachSubcolumn(MutableColumnCallback callback) override;
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
bool structureEquals(const IColumn & rhs) const override;
double getRatioOfDefaultRows(double sample_ratio) const override;
UInt64 getNumberOfDefaultRows() const override;

View File

@ -130,13 +130,13 @@ public:
ColumnPtr compress() const override;
void forEachSubcolumn(ColumnCallback callback) const override
void forEachSubcolumn(MutableColumnCallback callback) override
{
callback(nested_column);
callback(null_map);
}
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
{
callback(*nested_column);
nested_column->forEachSubcolumnRecursively(callback);

View File

@ -664,18 +664,18 @@ size_t ColumnObject::allocatedBytes() const
return res;
}
void ColumnObject::forEachSubcolumn(ColumnCallback callback) const
void ColumnObject::forEachSubcolumn(MutableColumnCallback callback)
{
for (const auto & entry : subcolumns)
for (const auto & part : entry->data.data)
for (auto & entry : subcolumns)
for (auto & part : entry->data.data)
callback(part);
}
void ColumnObject::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
void ColumnObject::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
{
for (const auto & entry : subcolumns)
for (auto & entry : subcolumns)
{
for (const auto & part : entry->data.data)
for (auto & part : entry->data.data)
{
callback(*part);
part->forEachSubcolumnRecursively(callback);

View File

@ -206,8 +206,8 @@ public:
size_t size() const override;
size_t byteSize() const override;
size_t allocatedBytes() const override;
void forEachSubcolumn(ColumnCallback callback) const override;
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
void forEachSubcolumn(MutableColumnCallback callback) override;
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
void insert(const Field & field) override;
void insertDefault() override;
void insertFrom(const IColumn & src, size_t n) override;

View File

@ -751,13 +751,13 @@ bool ColumnSparse::structureEquals(const IColumn & rhs) const
return false;
}
void ColumnSparse::forEachSubcolumn(ColumnCallback callback) const
void ColumnSparse::forEachSubcolumn(MutableColumnCallback callback)
{
callback(values);
callback(offsets);
}
void ColumnSparse::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
void ColumnSparse::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
{
callback(*values);
values->forEachSubcolumnRecursively(callback);

View File

@ -140,8 +140,8 @@ public:
ColumnPtr compress() const override;
void forEachSubcolumn(ColumnCallback callback) const override;
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
void forEachSubcolumn(MutableColumnCallback callback) override;
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
bool structureEquals(const IColumn & rhs) const override;

View File

@ -31,14 +31,12 @@ ColumnString::ColumnString(const ColumnString & src)
offsets(src.offsets.begin(), src.offsets.end()),
chars(src.chars.begin(), src.chars.end())
{
if (!offsets.empty())
{
Offset last_offset = offsets.back();
/// This will also prevent possible overflow in offset.
if (chars.size() != last_offset)
throw Exception(ErrorCodes::LOGICAL_ERROR, "String offsets has data inconsistent with chars array");
}
Offset last_offset = offsets.empty() ? 0 : offsets.back();
/// This will also prevent possible overflow in offset.
if (last_offset != chars.size())
throw Exception(ErrorCodes::LOGICAL_ERROR,
"String offsets has data inconsistent with chars array. Last offset: {}, array length: {}",
last_offset, chars.size());
}
@ -157,6 +155,7 @@ ColumnPtr ColumnString::filter(const Filter & filt, ssize_t result_size_hint) co
Offsets & res_offsets = res->offsets;
filterArraysImpl<UInt8>(chars, offsets, res_chars, res_offsets, filt, result_size_hint);
return res;
}
@ -571,10 +570,11 @@ void ColumnString::protect()
void ColumnString::validate() const
{
if (!offsets.empty() && offsets.back() != chars.size())
Offset last_offset = offsets.empty() ? 0 : offsets.back();
if (last_offset != chars.size())
throw Exception(ErrorCodes::LOGICAL_ERROR,
"ColumnString validation failed: size mismatch (internal logical error) {} != {}",
offsets.back(), chars.size());
last_offset, chars.size());
}
}

View File

@ -495,15 +495,15 @@ void ColumnTuple::getExtremes(Field & min, Field & max) const
max = max_tuple;
}
void ColumnTuple::forEachSubcolumn(ColumnCallback callback) const
void ColumnTuple::forEachSubcolumn(MutableColumnCallback callback)
{
for (const auto & column : columns)
for (auto & column : columns)
callback(column);
}
void ColumnTuple::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
void ColumnTuple::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
{
for (const auto & column : columns)
for (auto & column : columns)
{
callback(*column);
column->forEachSubcolumnRecursively(callback);

View File

@ -96,8 +96,8 @@ public:
size_t byteSizeAt(size_t n) const override;
size_t allocatedBytes() const override;
void protect() override;
void forEachSubcolumn(ColumnCallback callback) const override;
void forEachSubcolumnRecursively(RecursiveColumnCallback callback) const override;
void forEachSubcolumn(MutableColumnCallback callback) override;
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override;
bool structureEquals(const IColumn & rhs) const override;
bool isCollationSupported() const override;
ColumnPtr compress() const override;

View File

@ -62,19 +62,19 @@ ColumnPtr IColumn::createWithOffsets(const Offsets & offsets, const Field & defa
return res;
}
void IColumn::forEachSubcolumn(MutableColumnCallback callback)
void IColumn::forEachSubcolumn(ColumnCallback callback) const
{
std::as_const(*this).forEachSubcolumn([&callback](const WrappedPtr & subcolumn)
const_cast<IColumn*>(this)->forEachSubcolumn([&callback](WrappedPtr & subcolumn)
{
callback(const_cast<WrappedPtr &>(subcolumn));
callback(std::as_const(subcolumn));
});
}
void IColumn::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback)
void IColumn::forEachSubcolumnRecursively(RecursiveColumnCallback callback) const
{
std::as_const(*this).forEachSubcolumnRecursively([&callback](const IColumn & subcolumn)
const_cast<IColumn*>(this)->forEachSubcolumnRecursively([&callback](IColumn & subcolumn)
{
callback(const_cast<IColumn &>(subcolumn));
callback(std::as_const(subcolumn));
});
}

View File

@ -418,21 +418,23 @@ public:
/// If the column contains subcolumns (such as Array, Nullable, etc), do callback on them.
/// Shallow: doesn't do recursive calls; don't do call for itself.
using ColumnCallback = std::function<void(const WrappedPtr &)>;
virtual void forEachSubcolumn(ColumnCallback) const {}
using MutableColumnCallback = std::function<void(WrappedPtr &)>;
virtual void forEachSubcolumn(MutableColumnCallback callback);
virtual void forEachSubcolumn(MutableColumnCallback) {}
/// Default implementation calls the mutable overload using const_cast.
using ColumnCallback = std::function<void(const WrappedPtr &)>;
virtual void forEachSubcolumn(ColumnCallback) const;
/// Similar to forEachSubcolumn but it also do recursive calls.
/// In recursive calls it's prohibited to replace pointers
/// to subcolumns, so we use another callback function.
using RecursiveColumnCallback = std::function<void(const IColumn &)>;
virtual void forEachSubcolumnRecursively(RecursiveColumnCallback) const {}
using RecursiveMutableColumnCallback = std::function<void(IColumn &)>;
virtual void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback);
virtual void forEachSubcolumnRecursively(RecursiveMutableColumnCallback) {}
/// Default implementation calls the mutable overload using const_cast.
using RecursiveColumnCallback = std::function<void(const IColumn &)>;
virtual void forEachSubcolumnRecursively(RecursiveColumnCallback) const;
/// Columns have equal structure.
/// If true - you can use "compareAt", "insertFrom", etc. methods.

View File

@ -198,8 +198,9 @@
M(IOUringInFlightEvents, "Number of io_uring SQEs in flight") \
M(ReadTaskRequestsSent, "The current number of callback requests in flight from the remote server back to the initiator server to choose the read task (for s3Cluster table function and similar). Measured on the remote server side.") \
M(MergeTreeReadTaskRequestsSent, "The current number of callback requests in flight from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.") \
M(MergeTreeAllRangesAnnouncementsSent, "The current number of announcement being sent in flight from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.")
M(MergeTreeAllRangesAnnouncementsSent, "The current number of announcement being sent in flight from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \
M(CreatedTimersInQueryProfiler, "Number of Created thread local timers in QueryProfiler") \
M(ActiveTimersInQueryProfiler, "Number of Active thread local timers in QueryProfiler")
namespace CurrentMetrics
{

View File

@ -104,7 +104,7 @@ DNSResolver::IPAddresses hostByName(const std::string & host)
}
catch (const Poco::Net::DNSException & e)
{
LOG_ERROR(&Poco::Logger::get("DNSResolver"), "Cannot resolve host ({}), error {}: {}.", host, e.code(), e.name());
LOG_WARNING(&Poco::Logger::get("DNSResolver"), "Cannot resolve host ({}), error {}: {}.", host, e.code(), e.name());
addresses.clear();
}

View File

@ -1,4 +1,5 @@
#include <Common/GetPriorityForLoadBalancing.h>
#include <Common/Priority.h>
namespace DB
{
@ -8,23 +9,23 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
}
std::function<size_t(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const
std::function<Priority(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const
{
std::function<size_t(size_t index)> get_priority;
std::function<Priority(size_t index)> get_priority;
switch (load_balance)
{
case LoadBalancing::NEAREST_HOSTNAME:
if (hostname_differences.empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "It's a bug: hostname_differences is not initialized");
get_priority = [this](size_t i) { return hostname_differences[i]; };
get_priority = [this](size_t i) { return Priority{static_cast<Int64>(hostname_differences[i])}; };
break;
case LoadBalancing::IN_ORDER:
get_priority = [](size_t i) { return i; };
get_priority = [](size_t i) { return Priority{static_cast<Int64>(i)}; };
break;
case LoadBalancing::RANDOM:
break;
case LoadBalancing::FIRST_OR_RANDOM:
get_priority = [offset](size_t i) -> size_t { return i != offset; };
get_priority = [offset](size_t i) { return i != offset ? Priority{1} : Priority{0}; };
break;
case LoadBalancing::ROUND_ROBIN:
if (last_used >= pool_size)
@ -38,8 +39,8 @@ std::function<size_t(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc
* */
get_priority = [this, pool_size](size_t i)
{
++i;
return i < last_used ? pool_size - i : i - last_used;
++i; // To make `i` indexing start with 1 instead of 0 as `last_used` does
return Priority{static_cast<Int64>(i < last_used ? pool_size - i : i - last_used)};
};
break;
}

View File

@ -21,7 +21,7 @@ public:
return !(*this == other);
}
std::function<size_t(size_t index)> getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const;
std::function<Priority(size_t index)> getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const;
std::vector<size_t> hostname_differences; /// Distances from name of this host to the names of hosts of pools.

View File

@ -13,6 +13,7 @@
#include <Common/NetException.h>
#include <Common/Exception.h>
#include <Common/randomSeed.h>
#include <Common/Priority.h>
namespace DB
@ -34,7 +35,7 @@ namespace ProfileEvents
/// This class provides a pool with fault tolerance. It is used for pooling of connections to replicated DB.
/// Initialized by several PoolBase objects.
/// When a connection is requested, tries to create or choose an alive connection from one of the nested pools.
/// Pools are tried in the order consistent with lexicographical order of (error count, priority, random number) tuples.
/// Pools are tried in the order consistent with lexicographical order of (error count, slowdown count, config priority, priority, random number) tuples.
/// Number of tries for a single pool is limited by max_tries parameter.
/// The client can set nested pool priority by passing a GetPriority functor.
///
@ -113,7 +114,7 @@ public:
/// The client can provide this functor to affect load balancing - the index of a pool is passed to
/// this functor. The pools with lower result value will be tried first.
using GetPriorityFunc = std::function<size_t(size_t index)>;
using GetPriorityFunc = std::function<Priority(size_t index)>;
/// Returns at least min_entries and at most max_entries connections (at most one connection per nested pool).
/// The method will throw if it is unable to get min_entries alive connections or
@ -336,9 +337,9 @@ struct PoolWithFailoverBase<TNestedPool>::PoolState
/// The number of slowdowns that led to changing replica in HedgedRequestsFactory
UInt64 slowdown_count = 0;
/// Priority from the <remote_server> configuration.
Int64 config_priority = 1;
Priority config_priority{1};
/// Priority from the GetPriorityFunc.
Int64 priority = 0;
Priority priority{0};
UInt64 random = 0;
void randomize()

View File

@ -381,11 +381,25 @@ The server successfully detected this situation and will download merged part fr
M(CachedReadBufferReadFromCacheBytes, "Bytes read from filesystem cache") \
M(CachedReadBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \
M(CachedReadBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \
M(CachedReadBufferCreateBufferMicroseconds, "Prepare buffer time") \
M(CachedWriteBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \
M(CachedWriteBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \
\
M(FilesystemCacheEvictedBytes, "Number of bytes evicted from filesystem cache") \
M(FilesystemCacheEvictedFileSegments, "Number of file segments evicted from filesystem cache") \
M(FilesystemCacheLockKeyMicroseconds, "Lock cache key time") \
M(FilesystemCacheLockMetadataMicroseconds, "Lock filesystem cache metadata time") \
M(FilesystemCacheLockCacheMicroseconds, "Lock filesystem cache time") \
M(FilesystemCacheReserveMicroseconds, "Filesystem cache space reservation time") \
M(FilesystemCacheGetOrSetMicroseconds, "Filesystem cache getOrSet() time") \
M(FilesystemCacheGetMicroseconds, "Filesystem cache get() time") \
M(FileSegmentWaitMicroseconds, "Wait on DOWNLOADING state") \
M(FileSegmentCompleteMicroseconds, "Duration of FileSegment::complete() in filesystem cache") \
M(FileSegmentLockMicroseconds, "Lock file segment time") \
M(FileSegmentWriteMicroseconds, "File segment write() time") \
M(FileSegmentUseMicroseconds, "File segment use() time") \
M(FileSegmentRemoveMicroseconds, "File segment remove() time") \
M(FileSegmentHolderCompleteMicroseconds, "File segments holder complete() time") \
\
M(RemoteFSSeeks, "Total number of seeks for async buffer") \
M(RemoteFSPrefetches, "Number of prefetches made with asynchronous reading from remote filesystem") \
@ -407,7 +421,6 @@ The server successfully detected this situation and will download merged part fr
\
M(FileSegmentWaitReadBufferMicroseconds, "Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \
M(FileSegmentReadMicroseconds, "Metric per file segment. Time spend reading from file") \
M(FileSegmentWriteMicroseconds, "Metric per file segment. Time spend writing cache") \
M(FileSegmentCacheWriteMicroseconds, "Metric per file segment. Time spend writing data to cache") \
M(FileSegmentPredownloadMicroseconds, "Metric per file segment. Time spent predownloading data to cache (predownloading - finishing file segment download (after someone who failed to do that) up to the point current thread was requested to do)") \
M(FileSegmentUsedBytes, "Metric per file segment. How many bytes were actually used from current file segment") \

View File

@ -2,6 +2,7 @@
#include <IO/WriteHelpers.h>
#include <Common/TraceSender.h>
#include <Common/CurrentMetrics.h>
#include <Common/Exception.h>
#include <Common/StackTrace.h>
#include <Common/thread_local_rng.h>
@ -12,6 +13,11 @@
#include <random>
namespace CurrentMetrics
{
extern const Metric CreatedTimersInQueryProfiler;
extern const Metric ActiveTimersInQueryProfiler;
}
namespace ProfileEvents
{
@ -85,48 +91,14 @@ namespace ErrorCodes
extern const int NOT_IMPLEMENTED;
}
template <typename ProfilerImpl>
QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(UInt64 thread_id, int clock_type, UInt32 period, int pause_signal_)
: log(&Poco::Logger::get("QueryProfiler"))
, pause_signal(pause_signal_)
#if USE_UNWIND
Timer::Timer()
: log(&Poco::Logger::get("Timer"))
{}
void Timer::createIfNecessary(UInt64 thread_id, int clock_type, int pause_signal)
{
#if defined(SANITIZER)
UNUSED(thread_id);
UNUSED(clock_type);
UNUSED(period);
UNUSED(pause_signal);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers");
#elif !USE_UNWIND
UNUSED(thread_id);
UNUSED(clock_type);
UNUSED(period);
UNUSED(pause_signal);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler cannot work with stock libunwind");
#else
/// Sanity check.
if (!hasPHDRCache())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler cannot be used without PHDR cache, that is not available for TSan build");
/// Too high frequency can introduce infinite busy loop of signal handlers. We will limit maximum frequency (with 1000 signals per second).
if (period < 1000000)
period = 1000000;
struct sigaction sa{};
sa.sa_sigaction = ProfilerImpl::signalHandler;
sa.sa_flags = SA_SIGINFO | SA_RESTART;
if (sigemptyset(&sa.sa_mask))
throwFromErrno("Failed to clean signal mask for query profiler", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
if (sigaddset(&sa.sa_mask, pause_signal))
throwFromErrno("Failed to add signal to mask for query profiler", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
if (sigaction(pause_signal, &sa, nullptr))
throwFromErrno("Failed to setup signal handler for query profiler", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
try
if (!timer_id)
{
struct sigevent sev {};
sev.sigev_notify = SIGEV_THREAD_ID;
@ -151,25 +123,117 @@ QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(UInt64 thread_id, int clock_t
throwFromErrno("Failed to create thread timer", ErrorCodes::CANNOT_CREATE_TIMER);
}
timer_id.emplace(local_timer_id);
CurrentMetrics::add(CurrentMetrics::CreatedTimersInQueryProfiler);
}
}
/// Randomize offset as uniform random value from 0 to period - 1.
/// It will allow to sample short queries even if timer period is large.
/// (For example, with period of 1 second, query with 50 ms duration will be sampled with 1 / 20 probability).
/// It also helps to avoid interference (moire).
UInt32 period_rand = std::uniform_int_distribution<UInt32>(0, period)(thread_local_rng);
void Timer::set(UInt32 period)
{
/// Too high frequency can introduce infinite busy loop of signal handlers. We will limit maximum frequency (with 1000 signals per second).
if (period < 1000000)
period = 1000000;
/// Randomize offset as uniform random value from 0 to period - 1.
/// It will allow to sample short queries even if timer period is large.
/// (For example, with period of 1 second, query with 50 ms duration will be sampled with 1 / 20 probability).
/// It also helps to avoid interference (moire).
UInt32 period_rand = std::uniform_int_distribution<UInt32>(0, period)(thread_local_rng);
struct timespec interval{.tv_sec = period / TIMER_PRECISION, .tv_nsec = period % TIMER_PRECISION};
struct timespec offset{.tv_sec = period_rand / TIMER_PRECISION, .tv_nsec = period_rand % TIMER_PRECISION};
struct timespec interval{.tv_sec = period / TIMER_PRECISION, .tv_nsec = period % TIMER_PRECISION};
struct timespec offset{.tv_sec = period_rand / TIMER_PRECISION, .tv_nsec = period_rand % TIMER_PRECISION};
struct itimerspec timer_spec = {.it_interval = interval, .it_value = offset};
if (timer_settime(*timer_id, 0, &timer_spec, nullptr))
throwFromErrno("Failed to set thread timer period", ErrorCodes::CANNOT_SET_TIMER_PERIOD);
struct itimerspec timer_spec = {.it_interval = interval, .it_value = offset};
if (timer_settime(*timer_id, 0, &timer_spec, nullptr))
throwFromErrno("Failed to set thread timer period", ErrorCodes::CANNOT_SET_TIMER_PERIOD);
CurrentMetrics::add(CurrentMetrics::ActiveTimersInQueryProfiler);
}
void Timer::stop()
{
if (timer_id)
{
struct timespec stop_timer{.tv_sec = 0, .tv_nsec = 0};
struct itimerspec timer_spec = {.it_interval = stop_timer, .it_value = stop_timer};
int err = timer_settime(*timer_id, 0, &timer_spec, nullptr);
if (err)
LOG_ERROR(log, "Failed to stop query profiler timer {}", errnoToString());
chassert(!err && "Failed to stop query profiler timer");
CurrentMetrics::sub(CurrentMetrics::ActiveTimersInQueryProfiler);
}
}
Timer::~Timer()
{
try
{
cleanup();
}
catch (...)
{
tryLogCurrentException(log);
}
}
void Timer::cleanup()
{
if (timer_id)
{
int err = timer_delete(*timer_id);
if (err)
LOG_ERROR(log, "Failed to delete query profiler timer {}", errnoToString());
chassert(!err && "Failed to delete query profiler timer");
timer_id.reset();
CurrentMetrics::sub(CurrentMetrics::CreatedTimersInQueryProfiler);
}
}
#endif
template <typename ProfilerImpl>
QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(UInt64 thread_id, int clock_type, UInt32 period, int pause_signal_)
: log(&Poco::Logger::get("QueryProfiler"))
, pause_signal(pause_signal_)
{
#if defined(SANITIZER)
UNUSED(thread_id);
UNUSED(clock_type);
UNUSED(period);
UNUSED(pause_signal);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers");
#elif !USE_UNWIND
UNUSED(thread_id);
UNUSED(clock_type);
UNUSED(period);
UNUSED(pause_signal);
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler cannot work with stock libunwind");
#else
/// Sanity check.
if (!hasPHDRCache())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler cannot be used without PHDR cache, that is not available for TSan build");
struct sigaction sa{};
sa.sa_sigaction = ProfilerImpl::signalHandler;
sa.sa_flags = SA_SIGINFO | SA_RESTART;
if (sigemptyset(&sa.sa_mask))
throwFromErrno("Failed to clean signal mask for query profiler", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
if (sigaddset(&sa.sa_mask, pause_signal))
throwFromErrno("Failed to add signal to mask for query profiler", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
if (sigaction(pause_signal, &sa, nullptr))
throwFromErrno("Failed to setup signal handler for query profiler", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
try
{
timer.createIfNecessary(thread_id, clock_type, pause_signal);
timer.set(period);
signal_handler_disarmed = false;
}
catch (...)
{
tryCleanup();
timer.cleanup();
throw;
}
#endif
@ -178,22 +242,21 @@ QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(UInt64 thread_id, int clock_t
template <typename ProfilerImpl>
QueryProfilerBase<ProfilerImpl>::~QueryProfilerBase()
{
tryCleanup();
try
{
cleanup();
}
catch (...)
{
tryLogCurrentException(log);
}
}
template <typename ProfilerImpl>
void QueryProfilerBase<ProfilerImpl>::tryCleanup()
void QueryProfilerBase<ProfilerImpl>::cleanup()
{
#if USE_UNWIND
if (timer_id.has_value())
{
int err = timer_delete(*timer_id);
if (err)
LOG_ERROR(log, "Failed to delete query profiler timer {}", errnoToString());
chassert(!err && "Failed to delete query profiler timer");
timer_id.reset();
}
timer.stop();
signal_handler_disarmed = true;
#endif
}

View File

@ -27,6 +27,27 @@ namespace DB
* Destructor tries to unset timer and restore previous signal handler.
* Note that signal handler implementation is defined by template parameter. See QueryProfilerReal and QueryProfilerCPU.
*/
#if USE_UNWIND
class Timer
{
public:
Timer();
Timer(const Timer &) = delete;
Timer & operator = (const Timer &) = delete;
~Timer();
void createIfNecessary(UInt64 thread_id, int clock_type, int pause_signal);
void set(UInt32 period);
void stop();
void cleanup();
private:
Poco::Logger * log;
std::optional<timer_t> timer_id;
};
#endif
template <typename ProfilerImpl>
class QueryProfilerBase
{
@ -35,13 +56,12 @@ public:
~QueryProfilerBase();
private:
void tryCleanup();
void cleanup();
Poco::Logger * log;
#if USE_UNWIND
/// Timer id from timer_create(2)
std::optional<timer_t> timer_id;
inline static thread_local Timer timer = Timer();
#endif
/// Pause signal to interrupt threads to get traces

View File

@ -179,7 +179,7 @@ ZooKeeper::ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std
std::vector<ShuffleHost> ZooKeeper::shuffleHosts() const
{
std::function<size_t(size_t index)> get_priority = args.get_priority_load_balancing.getPriorityFunc(args.get_priority_load_balancing.load_balancing, 0, args.hosts.size());
std::function<Priority(size_t index)> get_priority = args.get_priority_load_balancing.getPriorityFunc(args.get_priority_load_balancing.load_balancing, 0, args.hosts.size());
std::vector<ShuffleHost> shuffle_hosts;
for (size_t i = 0; i < args.hosts.size(); ++i)
{

View File

@ -49,7 +49,7 @@ constexpr size_t MULTI_BATCH_SIZE = 100;
struct ShuffleHost
{
String host;
Int64 priority = 0;
Priority priority;
UInt64 random = 0;
void randomize()

View File

@ -2,6 +2,8 @@
#include <Core/SettingsFields.h>
#include <Common/SettingsChanges.h>
#include <Common/FieldVisitorToString.h>
#include <IO/Operators.h>
#include <base/range.h>
#include <boost/blank.hpp>
#include <unordered_map>
@ -547,14 +549,16 @@ void BaseSettings<TTraits>::read(ReadBuffer & in, SettingsWriteFormat format)
template <typename TTraits>
String BaseSettings<TTraits>::toString() const
{
String res;
for (const auto & field : *this)
WriteBufferFromOwnString out;
bool first = true;
for (const auto & setting : *this)
{
if (!res.empty())
res += ", ";
res += field.getName() + " = " + field.getValueString();
if (!first)
out << ", ";
out << setting.getName() << " = " << applyVisitor(FieldVisitorToString(), setting.getValue());
first = false;
}
return res;
return out.str();
}
template <typename TTraits>

View File

@ -96,6 +96,7 @@ class IColumn;
M(Bool, s3_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables.", 0) \
M(Bool, azure_truncate_on_insert, false, "Enables or disables truncate before insert in azure engine tables.", 0) \
M(Bool, s3_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in s3 engine tables", 0) \
M(Bool, s3_skip_empty_files, false, "Allow to skip empty files in s3 table engine", 0) \
M(Bool, azure_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in azure engine tables", 0) \
M(Bool, s3_check_objects_after_upload, false, "Check each uploaded object to s3 with head request to be sure that upload was successful", 0) \
M(Bool, s3_allow_parallel_part_upload, true, "Use multiple threads for s3 multipart upload. It may lead to slightly higher memory usage", 0) \
@ -105,6 +106,7 @@ class IColumn;
M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \
M(Bool, hdfs_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables", 0) \
M(Bool, hdfs_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in hdfs engine tables", 0) \
M(Bool, hdfs_skip_empty_files, false, "Allow to skip empty files in hdfs table engine", 0) \
M(UInt64, hsts_max_age, 0, "Expired time for hsts. 0 means disable HSTS.", 0) \
M(Bool, extremes, false, "Calculate minimums and maximums of the result columns. They can be output in JSON-formats.", IMPORTANT) \
M(Bool, use_uncompressed_cache, false, "Whether to use the cache of uncompressed blocks.", 0) \
@ -612,6 +614,8 @@ class IColumn;
M(Bool, engine_file_empty_if_not_exists, false, "Allows to select data from a file engine table without file", 0) \
M(Bool, engine_file_truncate_on_insert, false, "Enables or disables truncate before insert in file engine tables", 0) \
M(Bool, engine_file_allow_create_multiple_files, false, "Enables or disables creating a new file on each insert in file engine tables if format has suffix.", 0) \
M(Bool, engine_file_skip_empty_files, false, "Allows to skip empty files in file table engine", 0) \
M(Bool, engine_url_skip_empty_files, false, "Allows to skip empty files in url table engine", 0) \
M(Bool, allow_experimental_database_replicated, false, "Allow to create databases with Replicated engine", 0) \
M(UInt64, database_replicated_initial_query_timeout_sec, 300, "How long initial DDL query should wait for Replicated database to precess previous DDL queue entries", 0) \
M(Bool, database_replicated_enforce_synchronous_settings, false, "Enforces synchronous waiting for some queries (see also database_atomic_wait_for_drop_and_detach_synchronously, mutation_sync, alter_sync). Not recommended to enable these settings.", 0) \
@ -860,6 +864,7 @@ class IColumn;
M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \
M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \
M(Bool, input_format_csv_detect_header, true, "Automatically detect header with names and types in CSV format", 0) \
M(Bool, input_format_csv_allow_whitespace_or_tab_as_delimiter, false, "Allow to use spaces and tabs(\\t) as field delimiter in the CSV strings", 0) \
M(Bool, input_format_csv_trim_whitespaces, true, "Trims spaces and tabs (\\t) characters at the beginning and end in CSV strings", 0) \
M(Bool, input_format_tsv_detect_header, true, "Automatically detect header with names and types in TSV format", 0) \
M(Bool, input_format_custom_detect_header, true, "Automatically detect header with names and types in CustomSeparated format", 0) \

View File

@ -54,6 +54,7 @@
#include <Common/Elf.h>
#include <Common/setThreadName.h>
#include <Common/logger_useful.h>
#include <Interpreters/Context.h>
#include <filesystem>
#include <Loggers/OwnFormattingChannel.h>
@ -80,7 +81,9 @@ namespace DB
}
}
DB::PipeFDs signal_pipe;
using namespace DB;
PipeFDs signal_pipe;
/** Reset signal handler to the default and send signal to itself.
@ -89,10 +92,10 @@ DB::PipeFDs signal_pipe;
static void call_default_signal_handler(int sig)
{
if (SIG_ERR == signal(sig, SIG_DFL))
DB::throwFromErrno("Cannot set signal handler.", DB::ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
if (0 != raise(sig))
DB::throwFromErrno("Cannot send signal.", DB::ErrorCodes::CANNOT_SEND_SIGNAL);
throwFromErrno("Cannot send signal.", ErrorCodes::CANNOT_SEND_SIGNAL);
}
static const size_t signal_pipe_buf_size =
@ -110,8 +113,8 @@ static void writeSignalIDtoSignalPipe(int sig)
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
char buf[signal_pipe_buf_size];
DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
DB::writeBinary(sig, out);
WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
writeBinary(sig, out);
out.next();
errno = saved_errno;
@ -141,17 +144,17 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
char buf[signal_pipe_buf_size];
DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
const ucontext_t * signal_context = reinterpret_cast<ucontext_t *>(context);
const StackTrace stack_trace(*signal_context);
DB::writeBinary(sig, out);
DB::writePODBinary(*info, out);
DB::writePODBinary(signal_context, out);
DB::writePODBinary(stack_trace, out);
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
DB::writePODBinary(DB::current_thread, out);
writeBinary(sig, out);
writePODBinary(*info, out);
writePODBinary(signal_context, out);
writePODBinary(stack_trace, out);
writeBinary(static_cast<UInt32>(getThreadId()), out);
writePODBinary(current_thread, out);
out.next();
@ -203,12 +206,12 @@ public:
static_assert(PIPE_BUF >= 512);
static_assert(signal_pipe_buf_size <= PIPE_BUF, "Only write of PIPE_BUF to pipe is atomic and the minimal known PIPE_BUF across supported platforms is 512");
char buf[signal_pipe_buf_size];
DB::ReadBufferFromFileDescriptor in(signal_pipe.fds_rw[0], signal_pipe_buf_size, buf);
ReadBufferFromFileDescriptor in(signal_pipe.fds_rw[0], signal_pipe_buf_size, buf);
while (!in.eof())
{
int sig = 0;
DB::readBinary(sig, in);
readBinary(sig, in);
// We may log some specific signals afterwards, with different log
// levels and more info, but for completeness we log all signals
// here at trace level.
@ -231,8 +234,8 @@ public:
UInt32 thread_num;
std::string message;
DB::readBinary(thread_num, in);
DB::readBinary(message, in);
readBinary(thread_num, in);
readBinary(message, in);
onTerminate(message, thread_num);
}
@ -248,17 +251,17 @@ public:
ucontext_t * context{};
StackTrace stack_trace(NoCapture{});
UInt32 thread_num{};
DB::ThreadStatus * thread_ptr{};
ThreadStatus * thread_ptr{};
if (sig != SanitizerTrap)
{
DB::readPODBinary(info, in);
DB::readPODBinary(context, in);
readPODBinary(info, in);
readPODBinary(context, in);
}
DB::readPODBinary(stack_trace, in);
DB::readBinary(thread_num, in);
DB::readPODBinary(thread_ptr, in);
readPODBinary(stack_trace, in);
readBinary(thread_num, in);
readPODBinary(thread_ptr, in);
/// This allows to receive more signals if failure happens inside onFault function.
/// Example: segfault while symbolizing stack trace.
@ -275,8 +278,8 @@ private:
{
size_t pos = message.find('\n');
LOG_FATAL(log, "(version {}{}, build id: {}) (from thread {}) {}",
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, thread_num, message.substr(0, pos));
LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) {}",
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, daemon.git_hash, thread_num, message.substr(0, pos));
/// Print trace from std::terminate exception line-by-line to make it easy for grep.
while (pos != std::string_view::npos)
@ -298,9 +301,9 @@ private:
ucontext_t * context,
const StackTrace & stack_trace,
UInt32 thread_num,
DB::ThreadStatus * thread_ptr) const
ThreadStatus * thread_ptr) const
{
DB::ThreadStatus thread_status;
ThreadStatus thread_status;
String query_id;
String query;
@ -314,7 +317,7 @@ private:
if (auto logs_queue = thread_ptr->getInternalTextLogsQueue())
{
DB::CurrentThread::attachInternalTextLogsQueue(logs_queue, DB::LogsLevel::trace);
CurrentThread::attachInternalTextLogsQueue(logs_queue, LogsLevel::trace);
}
}
@ -332,14 +335,14 @@ private:
if (query_id.empty())
{
LOG_FATAL(log, "(version {}{}, build id: {}) (from thread {}) (no query) Received signal {} ({})",
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id,
LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) (no query) Received signal {} ({})",
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, daemon.git_hash,
thread_num, signal_description, sig);
}
else
{
LOG_FATAL(log, "(version {}{}, build id: {}) (from thread {}) (query_id: {}) (query: {}) Received signal {} ({})",
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id,
LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) (query_id: {}) (query: {}) Received signal {} ({})",
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, daemon.git_hash,
thread_num, query_id, query, signal_description, sig);
}
@ -358,12 +361,12 @@ private:
/// NOTE: This still require memory allocations and mutex lock inside logger.
/// BTW we can also print it to stderr using write syscalls.
DB::WriteBufferFromOwnString bare_stacktrace;
DB::writeString("Stack trace:", bare_stacktrace);
WriteBufferFromOwnString bare_stacktrace;
writeString("Stack trace:", bare_stacktrace);
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
{
DB::writeChar(' ', bare_stacktrace);
DB::writePointerHex(stack_trace.getFramePointers()[i], bare_stacktrace);
writeChar(' ', bare_stacktrace);
writePointerHex(stack_trace.getFramePointers()[i], bare_stacktrace);
}
LOG_FATAL(log, fmt::runtime(bare_stacktrace.str()));
@ -411,8 +414,48 @@ private:
/// Send crash report to developers (if configured)
if (sig != SanitizerTrap)
{
SentryWriter::onFault(sig, error_message, stack_trace);
/// Advice the user to send it manually.
if constexpr (std::string_view(VERSION_OFFICIAL).contains("official build"))
{
const auto & date_lut = DateLUT::instance();
/// Approximate support period, upper bound.
if (time(nullptr) - date_lut.makeDate(2000 + VERSION_MAJOR, VERSION_MINOR, 1) < (365 + 30) * 86400)
{
LOG_FATAL(log, "Report this error to https://github.com/ClickHouse/ClickHouse/issues");
}
else
{
LOG_FATAL(log, "ClickHouse version {} is old and should be upgraded to the latest version.", VERSION_STRING);
}
}
else
{
LOG_FATAL(log, "This ClickHouse version is not official and should be upgraded to the official build.");
}
}
/// ClickHouse Keeper does not link to some part of Settings.
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
/// List changed settings.
if (!query_id.empty())
{
ContextPtr query_context = thread_ptr->getQueryContext();
if (query_context)
{
String changed_settings = query_context->getSettingsRef().toString();
if (changed_settings.empty())
LOG_FATAL(log, "No settings were changed");
else
LOG_FATAL(log, "Changed settings: {}", changed_settings);
}
}
#endif
/// When everything is done, we will try to send these error messages to client.
if (thread_ptr)
thread_ptr->onFatalError();
@ -436,15 +479,15 @@ static DISABLE_SANITIZER_INSTRUMENTATION void sanitizerDeathCallback()
/// Also need to send data via pipe. Otherwise it may lead to deadlocks or failures in printing diagnostic info.
char buf[signal_pipe_buf_size];
DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
const StackTrace stack_trace;
int sig = SignalListener::SanitizerTrap;
DB::writeBinary(sig, out);
DB::writePODBinary(stack_trace, out);
DB::writeBinary(UInt32(getThreadId()), out);
DB::writePODBinary(DB::current_thread, out);
writeBinary(sig, out);
writePODBinary(stack_trace, out);
writeBinary(UInt32(getThreadId()), out);
writePODBinary(current_thread, out);
out.next();
@ -470,7 +513,7 @@ static DISABLE_SANITIZER_INSTRUMENTATION void sanitizerDeathCallback()
std::string log_message;
if (std::current_exception())
log_message = "Terminate called for uncaught exception:\n" + DB::getCurrentExceptionMessage(true);
log_message = "Terminate called for uncaught exception:\n" + getCurrentExceptionMessage(true);
else
log_message = "Terminate called without an active exception";
@ -482,11 +525,11 @@ static DISABLE_SANITIZER_INSTRUMENTATION void sanitizerDeathCallback()
log_message.resize(buf_size - 16);
char buf[buf_size];
DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf);
WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf);
DB::writeBinary(static_cast<int>(SignalListener::StdTerminate), out);
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
DB::writeBinary(log_message, out);
writeBinary(static_cast<int>(SignalListener::StdTerminate), out);
writeBinary(static_cast<UInt32>(getThreadId()), out);
writeBinary(log_message, out);
out.next();
abort();
@ -512,7 +555,7 @@ static bool tryCreateDirectories(Poco::Logger * logger, const std::string & path
}
catch (...)
{
LOG_WARNING(logger, "{}: when creating {}, {}", __PRETTY_FUNCTION__, path, DB::getCurrentExceptionMessage(true));
LOG_WARNING(logger, "{}: when creating {}, {}", __PRETTY_FUNCTION__, path, getCurrentExceptionMessage(true));
}
return false;
}
@ -527,7 +570,7 @@ void BaseDaemon::reloadConfiguration()
* (It's convenient to log in console when you start server without any command line parameters.)
*/
config_path = config().getString("config-file", getDefaultConfigFileName());
DB::ConfigProcessor config_processor(config_path, false, true);
ConfigProcessor config_processor(config_path, false, true);
config_processor.setConfigPath(fs::path(config_path).parent_path());
loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true);
@ -548,7 +591,7 @@ BaseDaemon::~BaseDaemon()
/// Reset signals to SIG_DFL to avoid trying to write to the signal_pipe that will be closed after.
for (int sig : handled_signals)
if (SIG_ERR == signal(sig, SIG_DFL))
DB::throwFromErrno("Cannot set signal handler.", DB::ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
signal_pipe.close();
}
@ -592,7 +635,7 @@ void BaseDaemon::closeFDs()
/// Iterate directory separately from closing fds to avoid closing iterated directory fd.
std::vector<int> fds;
for (const auto & path : fs::directory_iterator(proc_path))
fds.push_back(DB::parse<int>(path.path().filename()));
fds.push_back(parse<int>(path.path().filename()));
for (const auto & fd : fds)
{
@ -662,7 +705,7 @@ void BaseDaemon::initialize(Application & self)
}
umask(umask_num);
DB::ConfigProcessor(config_path).savePreprocessedConfig(loaded_config, "");
ConfigProcessor(config_path).savePreprocessedConfig(loaded_config, "");
/// Write core dump on crash.
{
@ -713,12 +756,12 @@ void BaseDaemon::initialize(Application & self)
/// {
/// try
/// {
/// DB::SomeApp app;
/// SomeApp app;
/// return app.run(argc, argv);
/// }
/// catch (...)
/// {
/// std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
/// std::cerr << getCurrentExceptionMessage(true) << "\n";
/// return 1;
/// }
/// }
@ -772,7 +815,7 @@ void BaseDaemon::initialize(Application & self)
/// Create pid file.
if (config().has("pid"))
pid_file.emplace(config().getString("pid"), DB::StatusFile::write_pid);
pid_file.emplace(config().getString("pid"), StatusFile::write_pid);
if (is_daemon)
{
@ -799,7 +842,7 @@ void BaseDaemon::initialize(Application & self)
initializeTerminationAndSignalProcessing();
logRevision();
for (const auto & key : DB::getMultipleKeysFromConfig(config(), "", "graphite"))
for (const auto & key : getMultipleKeysFromConfig(config(), "", "graphite"))
{
graphite_writers.emplace(key, std::make_unique<GraphiteWriter>(key));
}
@ -887,7 +930,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
signal_listener_thread.start(*signal_listener);
#if defined(__ELF__) && !defined(OS_FREEBSD)
String build_id_hex = DB::SymbolIndex::instance()->getBuildIDHex();
String build_id_hex = SymbolIndex::instance()->getBuildIDHex();
if (build_id_hex.empty())
build_id = "";
else
@ -902,7 +945,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
std::string executable_path = getExecutablePath();
if (!executable_path.empty())
stored_binary_hash = DB::Elf(executable_path).getStoredBinaryHash();
stored_binary_hash = Elf(executable_path).getStoredBinaryHash();
#endif
}
@ -963,7 +1006,7 @@ void BaseDaemon::handleSignal(int signal_id)
onInterruptSignals(signal_id);
}
else
throw DB::Exception::createDeprecated(std::string("Unsupported signal: ") + strsignal(signal_id), 0); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context
throw Exception::createDeprecated(std::string("Unsupported signal: ") + strsignal(signal_id), 0); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context
}
void BaseDaemon::onInterruptSignals(int signal_id)
@ -1020,7 +1063,7 @@ void BaseDaemon::setupWatchdog()
pid = fork();
if (-1 == pid)
DB::throwFromErrno("Cannot fork", DB::ErrorCodes::SYSTEM_ERROR);
throwFromErrno("Cannot fork", ErrorCodes::SYSTEM_ERROR);
if (0 == pid)
{
@ -1073,13 +1116,13 @@ void BaseDaemon::setupWatchdog()
pf = new OwnJSONPatternFormatter(config());
else
pf = new OwnPatternFormatter;
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr));
Poco::AutoPtr<OwnFormattingChannel> log = new OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr));
logger().setChannel(log);
}
/// Cuncurrent writing logs to the same file from two threads is questionable on its own,
/// but rotating them from two threads is disastrous.
if (auto * channel = dynamic_cast<DB::OwnSplitChannel *>(logger().getChannel()))
if (auto * channel = dynamic_cast<OwnSplitChannel *>(logger().getChannel()))
{
channel->setChannelProperty("log", Poco::FileChannel::PROP_ROTATION, "never");
channel->setChannelProperty("log", Poco::FileChannel::PROP_ROTATEONOPEN, "false");
@ -1191,7 +1234,7 @@ void systemdNotify(const std::string_view & command)
int s = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0);
if (s == -1)
DB::throwFromErrno("Can't create UNIX socket for systemd notify.", DB::ErrorCodes::SYSTEM_ERROR);
throwFromErrno("Can't create UNIX socket for systemd notify.", ErrorCodes::SYSTEM_ERROR);
SCOPE_EXIT({ close(s); });
@ -1202,7 +1245,7 @@ void systemdNotify(const std::string_view & command)
addr.sun_family = AF_UNIX;
if (len < 2 || len > sizeof(addr.sun_path) - 1)
throw DB::Exception(DB::ErrorCodes::SYSTEM_ERROR, "NOTIFY_SOCKET env var value \"{}\" is wrong.", path);
throw Exception(ErrorCodes::SYSTEM_ERROR, "NOTIFY_SOCKET env var value \"{}\" is wrong.", path);
memcpy(addr.sun_path, path, len + 1); /// write last zero as well.
@ -1214,7 +1257,7 @@ void systemdNotify(const std::string_view & command)
else if (path[0] == '/')
addrlen += 1; /// non-abstract-addresses should be zero terminated.
else
throw DB::Exception(DB::ErrorCodes::SYSTEM_ERROR, "Wrong UNIX path \"{}\" in NOTIFY_SOCKET env var", path);
throw Exception(ErrorCodes::SYSTEM_ERROR, "Wrong UNIX path \"{}\" in NOTIFY_SOCKET env var", path);
const struct sockaddr *sock_addr = reinterpret_cast <const struct sockaddr *>(&addr);
@ -1227,7 +1270,7 @@ void systemdNotify(const std::string_view & command)
if (errno == EINTR)
continue;
else
DB::throwFromErrno("Failed to notify systemd, sendto returned error.", DB::ErrorCodes::SYSTEM_ERROR);
throwFromErrno("Failed to notify systemd, sendto returned error.", ErrorCodes::SYSTEM_ERROR);
}
else
sent_bytes_total += sent_bytes;

View File

@ -300,49 +300,6 @@ namespace
MutableColumnPtr additional_keys_map;
};
template <typename T>
IndexMapsWithAdditionalKeys mapIndexWithAdditionalKeysRef(PaddedPODArray<T> & index, size_t dict_size)
{
PaddedPODArray<T> copy(index.cbegin(), index.cend());
HashMap<T, T> dict_map;
HashMap<T, T> add_keys_map;
for (auto val : index)
{
if (val < dict_size)
dict_map.insert({val, dict_map.size()});
else
add_keys_map.insert({val, add_keys_map.size()});
}
auto dictionary_map = ColumnVector<T>::create(dict_map.size());
auto additional_keys_map = ColumnVector<T>::create(add_keys_map.size());
auto & dict_data = dictionary_map->getData();
auto & add_keys_data = additional_keys_map->getData();
for (auto val : dict_map)
dict_data[val.second] = val.first;
for (auto val : add_keys_map)
add_keys_data[val.second] = val.first - dict_size;
for (auto & val : index)
val = val < dict_size ? dict_map[val]
: add_keys_map[val] + dict_map.size();
for (size_t i = 0; i < index.size(); ++i)
{
T expected = index[i] < dict_data.size() ? dict_data[index[i]]
: add_keys_data[index[i] - dict_data.size()] + dict_size;
if (expected != copy[i])
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected {}, but got {}", toString(expected), toString(copy[i]));
}
return {std::move(dictionary_map), std::move(additional_keys_map)};
}
template <typename T>
IndexMapsWithAdditionalKeys mapIndexWithAdditionalKeys(PaddedPODArray<T> & index, size_t dict_size)
{

View File

@ -36,6 +36,7 @@
#include <Parsers/ParserCreateQuery.h>
#include <Parsers/queryToString.h>
#include <Storages/StorageKeeperMap.h>
#include <Storages/AlterCommands.h>
namespace DB
{
@ -252,7 +253,7 @@ ClusterPtr DatabaseReplicated::getClusterImpl() const
treat_local_as_remote,
treat_local_port_as_remote,
cluster_auth_info.cluster_secure_connection,
/*priority=*/ 1,
Priority{1},
TSA_SUPPRESS_WARNING_FOR_READ(database_name), /// FIXME
cluster_auth_info.cluster_secret};
@ -1441,9 +1442,49 @@ bool DatabaseReplicated::shouldReplicateQuery(const ContextPtr & query_context,
return table->as<StorageKeeperMap>() != nullptr;
};
const auto is_replicated_table = [&](const ASTPtr & ast)
{
auto table_id = query_context->resolveStorageID(ast, Context::ResolveOrdinary);
StoragePtr table = DatabaseCatalog::instance().getTable(table_id, query_context);
return table->supportsReplication();
};
const auto has_many_shards = [&]()
{
/// If there is only 1 shard then there is no need to replicate some queries.
auto current_cluster = tryGetCluster();
return
!current_cluster || /// Couldn't get the cluster, so we don't know how many shards there are.
current_cluster->getShardsInfo().size() > 1;
};
/// Some ALTERs are not replicated on database level
if (const auto * alter = query_ptr->as<const ASTAlterQuery>())
return !alter->isAttachAlter() && !alter->isFetchAlter() && !alter->isDropPartitionAlter() && !is_keeper_map_table(query_ptr);
{
if (alter->isAttachAlter() || alter->isFetchAlter() || alter->isDropPartitionAlter() || is_keeper_map_table(query_ptr))
return false;
if (has_many_shards() || !is_replicated_table(query_ptr))
return true;
try
{
/// Metadata alter should go through database
for (const auto & child : alter->command_list->children)
if (AlterCommand::parse(child->as<ASTAlterCommand>()))
return true;
/// It's ALTER PARTITION or mutation, doesn't involve database
return false;
}
catch (...)
{
tryLogCurrentException(log);
}
return true;
}
/// DROP DATABASE is not replicated
if (const auto * drop = query_ptr->as<const ASTDropQuery>())
@ -1459,11 +1500,7 @@ bool DatabaseReplicated::shouldReplicateQuery(const ContextPtr & query_context,
if (is_keeper_map_table(query_ptr))
return false;
/// If there is only 1 shard then there is no need to replicate DELETE query.
auto current_cluster = tryGetCluster();
return
!current_cluster || /// Couldn't get the cluster, so we don't know how many shards there are.
current_cluster->getShardsInfo().size() > 1;
return has_many_shards() || !is_replicated_table(query_ptr);
}
return true;

View File

@ -8,6 +8,7 @@
#include <IO/BoundedReadBuffer.h>
#include <Common/getRandomASCIIString.h>
#include <Common/logger_useful.h>
#include <Common/ElapsedTimeProfileEventIncrement.h>
#include <base/hex.h>
#include <Interpreters/Context.h>
@ -26,6 +27,7 @@ extern const Event CachedReadBufferCacheWriteMicroseconds;
extern const Event CachedReadBufferReadFromSourceBytes;
extern const Event CachedReadBufferReadFromCacheBytes;
extern const Event CachedReadBufferCacheWriteBytes;
extern const Event CachedReadBufferCreateBufferMicroseconds;
}
namespace DB
@ -145,6 +147,8 @@ void CachedOnDiskReadBufferFromFile::initialize(size_t offset, size_t size)
CachedOnDiskReadBufferFromFile::ImplementationBufferPtr
CachedOnDiskReadBufferFromFile::getCacheReadBuffer(const FileSegment & file_segment) const
{
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::CachedReadBufferCreateBufferMicroseconds);
/// Use is_persistent flag from in-memory state of the filesegment,
/// because it is consistent with what is written on disk.
auto path = file_segment.getPathInLocalCache();
@ -167,6 +171,8 @@ CachedOnDiskReadBufferFromFile::getCacheReadBuffer(const FileSegment & file_segm
CachedOnDiskReadBufferFromFile::ImplementationBufferPtr
CachedOnDiskReadBufferFromFile::getRemoteReadBuffer(FileSegment & file_segment, ReadType read_type_)
{
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::CachedReadBufferCreateBufferMicroseconds);
switch (read_type_)
{
case ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE:
@ -401,6 +407,8 @@ CachedOnDiskReadBufferFromFile::getImplementationBuffer(FileSegment & file_segme
current_file_segment_counters.increment(
ProfileEvents::FileSegmentWaitReadBufferMicroseconds, watch.elapsedMicroseconds());
ProfileEvents::increment(ProfileEvents::FileSegmentWaitReadBufferMicroseconds, watch.elapsedMicroseconds());
[[maybe_unused]] auto download_current_segment = read_type == ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE;
chassert(download_current_segment == file_segment.isDownloader());

View File

@ -138,6 +138,7 @@ void CachedObjectStorage::removeCacheIfExists(const std::string & path_key_for_c
void CachedObjectStorage::removeObject(const StoredObject & object)
{
removeCacheIfExists(object.remote_path);
object_storage->removeObject(object);
}

View File

@ -751,7 +751,7 @@ namespace
private:
using Reader = typename CapnpType::Reader;
CapnpType::Reader getData(const ColumnPtr & column, size_t row_num)
Reader getData(const ColumnPtr & column, size_t row_num)
{
auto data = column->getDataAt(row_num);
if constexpr (std::is_same_v<CapnpType, capnp::Data>)
@ -801,7 +801,7 @@ namespace
private:
using Reader = typename CapnpType::Reader;
CapnpType::Reader getData(const ColumnPtr & column, size_t row_num)
Reader getData(const ColumnPtr & column, size_t row_num)
{
auto data = column->getDataAt(row_num);
if constexpr (std::is_same_v<CapnpType, capnp::Data>)

View File

@ -71,6 +71,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
format_settings.csv.try_detect_header = settings.input_format_csv_detect_header;
format_settings.csv.skip_trailing_empty_lines = settings.input_format_csv_skip_trailing_empty_lines;
format_settings.csv.trim_whitespaces = settings.input_format_csv_trim_whitespaces;
format_settings.csv.allow_whitespace_or_tab_as_delimiter = settings.input_format_csv_allow_whitespace_or_tab_as_delimiter;
format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter;
format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter;
format_settings.hive_text.map_keys_delimiter = settings.input_format_hive_text_map_keys_delimiter;

Some files were not shown because too many files have changed in this diff Show More