Merge branch 'master' into docs_table_function_update

This commit is contained in:
Александр 2022-09-01 14:01:32 +02:00 committed by GitHub
commit 97e55ca2f5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
398 changed files with 3508 additions and 1056 deletions

View File

@ -1,4 +1,4 @@
name: ReleaseWorkflow
name: PublishedReleaseCI
# - Gets artifacts from S3
# - Sends it to JFROG Artifactory
# - Adds them to the release assets
@ -15,7 +15,7 @@ jobs:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
JFROG_API_KEY=${{ secrets.JFROG_KEY_API_PACKAGES }}
JFROG_API_KEY=${{ secrets.JFROG_ARTIFACTORY_API_KEY }}
TEMP_PATH=${{runner.temp}}/release_packages
REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
EOF
@ -30,7 +30,7 @@ jobs:
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY"
python3 ./tests/ci/push_to_artifactory.py --release "${{ github.ref }}" \
--commit '${{ github.sha }}' --all
--commit '${{ github.sha }}' --artifactory-url "${{ secrets.JFROG_ARTIFACTORY_URL }}" --all
- name: Upload packages to release assets
uses: svenstaro/upload-release-action@v2
with:

View File

@ -1,4 +1,4 @@
name: ReleaseCI
name: ReleaseBranchCI
env:
# Force the stdout and stderr streams to be unbuffered

View File

@ -0,0 +1,22 @@
#define _GNU_SOURCE
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include "syscall.h"
int dup3(int old, int new, int flags)
{
int r;
#ifdef SYS_dup2
if (old==new) return __syscall_ret(-EINVAL);
if (flags & O_CLOEXEC) {
while ((r=__syscall(SYS_dup3, old, new, flags))==-EBUSY);
if (r!=-ENOSYS) return __syscall_ret(r);
}
while ((r=__syscall(SYS_dup2, old, new))==-EBUSY);
if (flags & O_CLOEXEC) __syscall(SYS_fcntl, new, F_SETFD, FD_CLOEXEC);
#else
while ((r=__syscall(SYS_dup3, old, new, flags))==-EBUSY);
#endif
return __syscall_ret(r);
}

View File

@ -0,0 +1,26 @@
#include <sys/inotify.h>
#include <errno.h>
#include "syscall.h"
int inotify_init()
{
return inotify_init1(0);
}
int inotify_init1(int flags)
{
int r = __syscall(SYS_inotify_init1, flags);
#ifdef SYS_inotify_init
if (r==-ENOSYS && !flags) r = __syscall(SYS_inotify_init);
#endif
return __syscall_ret(r);
}
int inotify_add_watch(int fd, const char *pathname, uint32_t mask)
{
return syscall(SYS_inotify_add_watch, fd, pathname, mask);
}
int inotify_rm_watch(int fd, int wd)
{
return syscall(SYS_inotify_rm_watch, fd, wd);
}

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit 33f60f961d4914441b684af43e9e5535078ba54b
Subproject commit bdba298189e29995892de78dcecf64d127444e81

2
contrib/libuv vendored

@ -1 +1 @@
Subproject commit 95081e7c16c9857babe6d4e2bc1c779198ea89ae
Subproject commit 3a85b2eb3d83f369b8a8cafd329d7e9dc28f60cf

View File

@ -15,6 +15,7 @@ set(uv_sources
src/inet.c
src/random.c
src/strscpy.c
src/strtok.c
src/threadpool.c
src/timer.c
src/uv-common.c
@ -75,13 +76,13 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
list(APPEND uv_libraries rt)
list(APPEND uv_sources
src/unix/epoll.c
src/unix/linux-core.c
src/unix/linux-inotify.c
src/unix/linux-syscalls.c
src/unix/procfs-exepath.c
src/unix/random-getrandom.c
src/unix/random-sysctl-linux.c
src/unix/sysinfo-loadavg.c)
src/unix/random-sysctl-linux.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD")
@ -111,6 +112,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS/390")
src/unix/pthread-fixes.c
src/unix/pthread-barrier.c
src/unix/os390.c
src/unix/os390-proctitle.c
src/unix/os390-syscalls.c)
endif()

View File

@ -0,0 +1,25 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.12.19-lts (4a08f8a073b) FIXME as compared to v22.3.11.12-lts (137c5f72657)
#### Build/Testing/Packaging Improvement
* Backported in [#40695](https://github.com/ClickHouse/ClickHouse/issues/40695): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40160](https://github.com/ClickHouse/ClickHouse/issues/40160): fix HashMethodOneNumber get wrong key value when column is const. [#40020](https://github.com/ClickHouse/ClickHouse/pull/40020) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#40122](https://github.com/ClickHouse/ClickHouse/issues/40122): Fix bug in collectFilesToSkip() by adding correct file extension(.idx or idx2) for indexes to be recalculated, avoid wrong hard links. Fixed [#39896](https://github.com/ClickHouse/ClickHouse/issues/39896). [#40095](https://github.com/ClickHouse/ClickHouse/pull/40095) ([Jianmei Zhang](https://github.com/zhangjmruc)).
* Backported in [#40207](https://github.com/ClickHouse/ClickHouse/issues/40207): Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#40270](https://github.com/ClickHouse/ClickHouse/issues/40270): Fix possible segfault in CapnProto input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* fix heap buffer overflow by limiting http chunk size [#40292](https://github.com/ClickHouse/ClickHouse/pull/40292) ([Sema Checherinda](https://github.com/CheSema)).
* Reduce changelog verbosity in CI [#40360](https://github.com/ClickHouse/ClickHouse/pull/40360) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backport the upstream clickhouse_helper.py [#40490](https://github.com/ClickHouse/ClickHouse/pull/40490) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,17 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.6.7.7-stable (8eae2af3b9a) FIXME as compared to v22.6.6.16-stable (d2a33ebc822)
#### Build/Testing/Packaging Improvement
* Backported in [#40692](https://github.com/ClickHouse/ClickHouse/issues/40692): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40531](https://github.com/ClickHouse/ClickHouse/issues/40531): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#40623](https://github.com/ClickHouse/ClickHouse/issues/40623): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,23 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.7.5.13-stable (6f48d2d1f59) FIXME as compared to v22.7.4.16-stable (0b9272f8fdc)
#### Build/Testing/Packaging Improvement
* Backported in [#40693](https://github.com/ClickHouse/ClickHouse/issues/40693): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40542](https://github.com/ClickHouse/ClickHouse/issues/40542): Fix potential deadlock in WriteBufferFromS3 during task scheduling failure. [#40070](https://github.com/ClickHouse/ClickHouse/pull/40070) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#40450](https://github.com/ClickHouse/ClickHouse/issues/40450): Fix rare bug with column TTL for MergeTree engines family: In case of repeated vertical merge the error `Cannot unlink file ColumnName.bin ... No such file or directory.` could happen. [#40346](https://github.com/ClickHouse/ClickHouse/pull/40346) ([alesapin](https://github.com/alesapin)).
* Backported in [#40532](https://github.com/ClickHouse/ClickHouse/issues/40532): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#40624](https://github.com/ClickHouse/ClickHouse/issues/40624): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* DNSResolver remove AI_V4MAPPED, AI_ALL hints [#40502](https://github.com/ClickHouse/ClickHouse/pull/40502) ([Maksim Kita](https://github.com/kitaisreal)).

View File

@ -0,0 +1,25 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.8.3.13-lts (6a15b73faea) FIXME as compared to v22.8.2.11-lts (b4ed6d744ff)
#### Improvement
* Backported in [#40550](https://github.com/ClickHouse/ClickHouse/issues/40550): Improve schema inference cache, respect format settings that can change the schema. [#40414](https://github.com/ClickHouse/ClickHouse/pull/40414) ([Kruglov Pavel](https://github.com/Avogar)).
#### Build/Testing/Packaging Improvement
* Backported in [#40694](https://github.com/ClickHouse/ClickHouse/issues/40694): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40451](https://github.com/ClickHouse/ClickHouse/issues/40451): Fix rare bug with column TTL for MergeTree engines family: In case of repeated vertical merge the error `Cannot unlink file ColumnName.bin ... No such file or directory.` could happen. [#40346](https://github.com/ClickHouse/ClickHouse/pull/40346) ([alesapin](https://github.com/alesapin)).
* Backported in [#40533](https://github.com/ClickHouse/ClickHouse/issues/40533): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#40625](https://github.com/ClickHouse/ClickHouse/issues/40625): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* DNSResolver remove AI_V4MAPPED, AI_ALL hints [#40502](https://github.com/ClickHouse/ClickHouse/pull/40502) ([Maksim Kita](https://github.com/kitaisreal)).

View File

@ -0,0 +1,18 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.8.4.7-lts (baad27bcd2f) FIXME as compared to v22.8.3.13-lts (6a15b73faea)
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40760](https://github.com/ClickHouse/ClickHouse/issues/40760): Fix possible error 'Decimal math overflow' while parsing DateTime64. [#40546](https://github.com/ClickHouse/ClickHouse/pull/40546) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#40811](https://github.com/ClickHouse/ClickHouse/issues/40811): In [#40595](https://github.com/ClickHouse/ClickHouse/issues/40595) it was reported that the `host_regexp` functionality was not working properly with a name to address resolution in `/etc/hosts`. It's fixed. [#40769](https://github.com/ClickHouse/ClickHouse/pull/40769) ([Arthur Passos](https://github.com/arthurpassos)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Migrate artifactory [#40831](https://github.com/ClickHouse/ClickHouse/pull/40831) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -4,7 +4,7 @@ sidebar_position: 50
sidebar_label: MySQL
---
# MySQL
# MySQL
Allows to connect to databases on a remote MySQL server and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and MySQL.
@ -99,7 +99,7 @@ mysql> select * from mysql_table;
Database in ClickHouse, exchanging data with the MySQL server:
``` sql
CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password')
CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') SETTINGS read_write_timeout=10000, connect_timeout=100;
```
``` sql

View File

@ -2,10 +2,9 @@
slug: /en/engines/table-engines/integrations/ExternalDistributed
sidebar_position: 12
sidebar_label: ExternalDistributed
title: ExternalDistributed
---
# ExternalDistributed
The `ExternalDistributed` engine allows to perform `SELECT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible.
## Creating a Table {#creating-a-table}

View File

@ -2,10 +2,9 @@
slug: /en/engines/table-engines/integrations/materialized-postgresql
sidebar_position: 12
sidebar_label: MaterializedPostgreSQL
title: MaterializedPostgreSQL
---
# MaterializedPostgreSQL
Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database.
If more than one table is required, it is highly recommended to use the [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) database engine instead of the table engine and use the `materialized_postgresql_tables_list` setting, which specifies the tables to be replicated (will also be possible to add database `schema`). It will be much better in terms of CPU, fewer connections and fewer replication slots inside the remote PostgreSQL database.

View File

@ -2,10 +2,9 @@
slug: /en/getting-started/example-datasets/brown-benchmark
sidebar_label: Brown University Benchmark
description: A new analytical benchmark for machine-generated log data
title: "Brown University Benchmark"
---
# Brown University Benchmark
`MgBench` is a new analytical benchmark for machine-generated log data, [Andrew Crotty](http://cs.brown.edu/people/acrotty/).
Download the data:

View File

@ -1,10 +1,9 @@
---
slug: /en/getting-started/example-datasets/cell-towers
sidebar_label: Cell Towers
title: "Cell Towers"
---
# Cell Towers
This dataset is from [OpenCellid](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers.
As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc).

View File

@ -1,10 +1,9 @@
---
slug: /en/getting-started/example-datasets/menus
sidebar_label: New York Public Library "What's on the Menu?" Dataset
title: "New York Public Library \"What's on the Menu?\" Dataset"
---
# New York Public Library "What's on the Menu?" Dataset
The dataset is created by the New York Public Library. It contains historical data on the menus of hotels, restaurants and cafes with the dishes along with their prices.
Source: http://menus.nypl.org/data

View File

@ -2,10 +2,9 @@
slug: /en/getting-started/example-datasets/opensky
sidebar_label: Air Traffic Data
description: The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic.
title: "Crowdsourced air traffic data from The OpenSky Network 2020"
---
# Crowdsourced air traffic data from The OpenSky Network 2020
The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic. It spans all flights seen by the network's more than 2500 members since 1 January 2019. More data will be periodically included in the dataset until the end of the COVID-19 pandemic.
Source: https://zenodo.org/record/5092942#.YRBCyTpRXYd

View File

@ -1,10 +1,9 @@
---
slug: /en/getting-started/example-datasets/recipes
sidebar_label: Recipes Dataset
title: "Recipes Dataset"
---
# Recipes Dataset
RecipeNLG dataset is available for download [here](https://recipenlg.cs.put.poznan.pl/dataset). It contains 2.2 million recipes. The size is slightly less than 1 GB.
## Download and Unpack the Dataset

View File

@ -2,33 +2,23 @@
slug: /en/getting-started/example-datasets/uk-price-paid
sidebar_label: UK Property Price Paid
sidebar_position: 1
title: "UK Property Price Paid"
---
# UK Property Price Paid
The dataset contains data about prices paid for real-estate property in England and Wales. The data is available since year 1995.
The size of the dataset in uncompressed form is about 4 GiB and it will take about 278 MiB in ClickHouse.
The size of the dataset in uncompressed form is about 4 GiB and it will take about 270 MiB in ClickHouse.
Source: https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads
Source: https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads <br/>
Description of the fields: https://www.gov.uk/guidance/about-the-price-paid-data
Contains HM Land Registry data © Crown copyright and database right 2021. This data is licensed under the Open Government Licence v3.0.
## Download the Dataset {#download-dataset}
Run the command:
```bash
wget http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv
```
Download will take about 2 minutes with good internet connection.
## Create the Table {#create-table}
```sql
CREATE TABLE uk_price_paid
(
uuid UUID,
price UInt32,
date Date,
postcode1 LowCardinality(String),
@ -43,65 +33,68 @@ CREATE TABLE uk_price_paid
town LowCardinality(String),
district LowCardinality(String),
county LowCardinality(String),
category UInt8
) ENGINE = MergeTree ORDER BY (postcode1, postcode2, addr1, addr2);
category UInt8,
category2 UInt8
) ORDER BY (postcode1, postcode2, addr1, addr2);
```
## Preprocess and Import Data {#preprocess-import-data}
We will use `clickhouse-local` tool for data preprocessing and `clickhouse-client` to upload it.
In this example, we define the structure of source data from the CSV file and specify a query to preprocess the data with `clickhouse-local`.
In this example, we define the structure of source data from the CSV file and specify a query to preprocess the data with either `clickhouse-client` or the web based Play UI.
The preprocessing is:
- splitting the postcode to two different columns `postcode1` and `postcode2` that is better for storage and queries;
- splitting the postcode to two different columns `postcode1` and `postcode2` that are better for storage and queries;
- coverting the `time` field to date as it only contains 00:00 time;
- ignoring the [UUid](../../sql-reference/data-types/uuid.md) field because we don't need it for analysis;
- transforming `type` and `duration` to more readable Enum fields with function [transform](../../sql-reference/functions/other-functions.md#transform);
- transforming `is_new` and `category` fields from single-character string (`Y`/`N` and `A`/`B`) to [UInt8](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-uint256-int8-int16-int32-int64-int128-int256) field with 0 and 1.
Preprocessed data is piped directly to `clickhouse-client` to be inserted into ClickHouse table in streaming fashion.
```bash
clickhouse-local --input-format CSV --structure '
uuid String,
price UInt32,
time DateTime,
postcode String,
a String,
b String,
c String,
addr1 String,
addr2 String,
street String,
locality String,
town String,
district String,
county String,
d String,
e String
' --query "
WITH splitByChar(' ', postcode) AS p
SELECT
price,
toDate(time) AS date,
p[1] AS postcode1,
p[2] AS postcode2,
transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type,
b = 'Y' AS is_new,
transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration,
addr1,
addr2,
street,
locality,
town,
district,
county,
d = 'B' AS category
FROM table" --date_time_input_format best_effort < pp-complete.csv | clickhouse-client --query "INSERT INTO uk_price_paid FORMAT TSV"
INSERT INTO uk_price_paid
WITH
splitByChar(' ', postcode) AS p
SELECT
replaceRegexpAll(uuid_string, '{|}','') AS uuid,
toUInt32(price_string) AS price,
parseDateTimeBestEffortUS(time) AS date,
p[1] AS postcode1,
p[2] AS postcode2,
transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type,
b = 'Y' AS is_new,
transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration,
addr1,
addr2,
street,
locality,
town,
district,
county,
d = 'B' AS category,
e = 'B' AS category2
FROM url(
'http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv',
'CSV',
'uuid_string String,
price_string String,
time String,
postcode String,
a String,
b String,
c String,
addr1 String,
addr2 String,
street String,
locality String,
town String,
district String,
county String,
d String,
e String'
)
SETTINGS max_http_get_redirects=1;
```
It will take about 40 seconds.
It will take about 2 minutes depending on where you are in the world, and where your ClickHouse servers are. Almost all of the time is the download time of the CSV file from the UK government server.
## Validate the Data {#validate-data}
@ -113,13 +106,13 @@ SELECT count() FROM uk_price_paid;
Result:
```text
```response
┌──count()─┐
│ 26321785
│ 27450499
└──────────┘
```
The size of dataset in ClickHouse is just 278 MiB, check it.
The size of dataset in ClickHouse is just 540 MiB, check it.
Query:
@ -131,10 +124,14 @@ Result:
```text
┌─formatReadableSize(total_bytes)─┐
278.80 MiB │
545.04 MiB │
└─────────────────────────────────┘
```
:::note
The above size is for a replicated table, if you are using this dataset with a single instance the size will be half.
:::
## Run Some Queries {#run-queries}
### Query 1. Average Price Per Year {#average-price}
@ -147,7 +144,7 @@ SELECT toYear(date) AS year, round(avg(price)) AS price, bar(price, 0, 1000000,
Result:
```text
```response
┌─year─┬──price─┬─bar(round(avg(price)), 0, 1000000, 80)─┐
│ 1995 │ 67932 │ █████▍ │
│ 1996 │ 71505 │ █████▋ │

View File

@ -1,10 +1,10 @@
---
slug: /en/operations/backup
sidebar_position: 49
sidebar_label: Data Backup
sidebar_label: Data backup and restore
---
# Data Backup
# Data backup and restore
While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you cant just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented.
@ -16,21 +16,181 @@ Each company has different resources available and business requirements, so the
Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly.
:::
## Duplicating Source Data Somewhere Else {#duplicating-source-data-somewhere-else}
## Configure a backup destination
In the examples below you will see the backup destination specified like `Disk('backups', '1.zip')`. To prepare the destination add a file to `/etc/clickhouse-server/config.d/backup_disk.xml` specifying the backup destination. For example, this file defines disk named `backups` and then adds that disk to the **backups > allowed_disk** list:
```xml
<clickhouse>
<storage_configuration>
<disks>
<!--highlight-next-line -->
<backups>
<type>local</type>
<path>/backups/</path>
</backups>
</disks>
</storage_configuration>
<!--highlight-start -->
<backups>
<allowed_disk>backups</allowed_disk>
<allowed_path>/backups/</allowed_path>
</backups>
<!--highlight-end -->
</clickhouse>
```
## Parameters
Backups can be either full or incremental, and can include tables (including materialized views, projections, and dictionaries), and databases. Backups can be synchronous (default) or asynchronous. They can be compressed. Backups can be password protected.
The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a destination (or source), options and settings:
- The destination for the backup, or the source for the restore. This is based on the disk defined earlier. For example `Disk('backups', 'filename.zip')`
- ASYNC: backup or restore asynchronously
- PARTITIONS: a list of partitions to restore
- SETTINGS:
- [`compression_method`](en/sql-reference/statements/create/table/#column-compression-codecs) and compression_level
- `password` for the file on disk
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
## Usage examples
Backup and then restore a table:
```
BACKUP TABLE test.table TO Disk('backups', '1.zip')
```
Corresponding restore:
```
RESTORE TABLE test.table FROM Disk('backups', '1.zip')
```
:::note
The above RESTORE would fail if the table `test.table` contains data, you would have to drop the table in order to test the RESTORE, or use the setting `allow_non_empty_tables=true`:
```
RESTORE TABLE test.table FROM Disk('backups', '1.zip')
SETTINGS allow_non_empty_tables=true
```
:::
Tables can be restored, or backed up, with new names:
```
RESTORE TABLE test.table AS test.table2 FROM Disk('backups', '1.zip')
```
```
BACKUP TABLE test.table3 AS test.table4 TO Disk('backups', '2.zip')
```
## Incremental backups
Incremental backups can be taken by specifying the `base_backup`.
:::note
Incremental backups depend on the base backup. The base backup must be kept available in order to be able to restore from an incremental backup.
:::
Incrementally store new data. The setting `base_backup` causes data since a previous backup to `Disk('backups', 'd.zip')` to be stored to `Disk('backups', 'incremental-a.zip')`:
```
BACKUP TABLE test.table TO Disk('backups', 'incremental-a.zip')
SETTINGS base_backup = Disk('backups', 'd.zip')
```
Restore all data from the incremental backup and the base_backup into a new table `test.table2`:
```
RESTORE TABLE test.table AS test.table2
FROM Disk('backups', 'incremental-a.zip');
```
## Assign a password to the backup
Backups written to disk can have a password applied to the file:
```
BACKUP TABLE test.table
TO Disk('backups', 'password-protected.zip')
SETTINGS password='qwerty'
```
Restore:
```
RESTORE TABLE test.table
FROM Disk('backups', 'password-protected.zip')
SETTINGS password='qwerty'
```
## Compression settings
If you would like to specify the compression method or level:
```
BACKUP TABLE test.table
TO Disk('backups', 'filename.zip')
SETTINGS compression_method='lzma', compression_level=3
```
## Restore specific partitions
If specific partitions associated with a table need to be restored these can be specified. To restore partitions 1 and 4 from backup:
```
RESTORE TABLE test.table PARTITIONS '2', '3'
FROM Disk('backups', 'filename.zip')
```
## Check the status of backups
The backup command returns an `id` and `status`, and that `id` can be used to get the status of the backup. This is very useful to check the progress of long ASYNC backups. The example below shows a failure that happened when trying to overwrite an existing backup file:
```sql
BACKUP TABLE helloworld.my_first_table TO Disk('backups', '1.zip') ASYNC
```
```response
┌─id───────────────────────────────────┬─status──────────┐
│ 7678b0b3-f519-4e6e-811f-5a0781a4eb52 │ CREATING_BACKUP │
└──────────────────────────────────────┴─────────────────┘
1 row in set. Elapsed: 0.001 sec.
```
```
SELECT
*
FROM system.backups
where id='7678b0b3-f519-4e6e-811f-5a0781a4eb52'
FORMAT Vertical
```
```response
Row 1:
──────
id: 7678b0b3-f519-4e6e-811f-5a0781a4eb52
name: Disk('backups', '1.zip')
#highlight-next-line
status: BACKUP_FAILED
num_files: 0
uncompressed_size: 0
compressed_size: 0
#highlight-next-line
error: Code: 598. DB::Exception: Backup Disk('backups', '1.zip') already exists. (BACKUP_ALREADY_EXISTS) (version 22.8.2.11 (official build))
start_time: 2022-08-30 09:21:46
end_time: 2022-08-30 09:21:46
1 row in set. Elapsed: 0.002 sec.
```
## Alternatives
ClickHouse stores data on disk, and there are many ways to backup disks. These are some alternatives that have been used in the past, and that may fit in well in your environment.
### Duplicating Source Data Somewhere Else {#duplicating-source-data-somewhere-else}
Often data that is ingested into ClickHouse is delivered through some sort of persistent queue, such as [Apache Kafka](https://kafka.apache.org). In this case it is possible to configure an additional set of subscribers that will read the same data stream while it is being written to ClickHouse and store it in cold storage somewhere. Most companies already have some default recommended cold storage, which could be an object store or a distributed filesystem like [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html).
## Filesystem Snapshots {#filesystem-snapshots}
### Filesystem Snapshots {#filesystem-snapshots}
Some local filesystems provide snapshot functionality (for example, [ZFS](https://en.wikipedia.org/wiki/ZFS)), but they might not be the best choice for serving live queries. A possible solution is to create additional replicas with this kind of filesystem and exclude them from the [Distributed](../engines/table-engines/special/distributed.md) tables that are used for `SELECT` queries. Snapshots on such replicas will be out of reach of any queries that modify data. As a bonus, these replicas might have special hardware configurations with more disks attached per server, which would be cost-effective.
## clickhouse-copier {#clickhouse-copier}
### clickhouse-copier {#clickhouse-copier}
[clickhouse-copier](../operations/utilities/clickhouse-copier.md) is a versatile tool that was initially created to re-shard petabyte-sized tables. It can also be used for backup and restore purposes because it reliably copies data between ClickHouse tables and clusters.
For smaller volumes of data, a simple `INSERT INTO ... SELECT ...` to remote tables might work as well.
## Manipulations with Parts {#manipulations-with-parts}
### Manipulations with Parts {#manipulations-with-parts}
ClickHouse allows using the `ALTER TABLE ... FREEZE PARTITION ...` query to create a local copy of table partitions. This is implemented using hardlinks to the `/var/lib/clickhouse/shadow/` folder, so it usually does not consume extra disk space for old data. The created copies of files are not handled by ClickHouse server, so you can just leave them there: you will have a simple backup that does not require any additional external system, but it will still be prone to hardware issues. For this reason, its better to remotely copy them to another location and then remove the local copies. Distributed filesystems and object stores are still a good options for this, but normal attached file servers with a large enough capacity might work as well (in this case the transfer will occur via the network filesystem or maybe [rsync](https://en.wikipedia.org/wiki/Rsync)).
Data can be restored from backup using the `ALTER TABLE ... ATTACH PARTITION ...`
@ -39,4 +199,3 @@ For more information about queries related to partition manipulations, see the [
A third-party tool is available to automate this approach: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup).
[Original article](https://clickhouse.com/docs/en/operations/backup/) <!--hide-->

View File

@ -2,10 +2,9 @@
slug: /en/operations/caches
sidebar_position: 65
sidebar_label: Caches
title: "Cache Types"
---
# Cache Types
When performing queries, ClickHouse uses different caches.
Main cache types:

View File

@ -2,10 +2,9 @@
slug: /en/operations/external-authenticators/
sidebar_position: 48
sidebar_label: External User Authenticators and Directories
title: "External User Authenticators and Directories"
---
# External User Authenticators and Directories
ClickHouse supports authenticating and managing users using external services.
The following external authenticators and directories are supported:

View File

@ -1,7 +1,7 @@
---
slug: /en/operations/external-authenticators/ldap
title: "LDAP"
---
# LDAP
LDAP server can be used to authenticate ClickHouse users. There are two different approaches for doing this:

View File

@ -1,7 +1,7 @@
---
slug: /en/operations/external-authenticators/ssl-x509
title: "SSL X.509 certificate authentication"
---
# SSL X.509 certificate authentication
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` field of the certificate is used to identify connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
@ -24,4 +24,4 @@ To enable SSL certificate authentication, a list of `Common Name`'s for each Cli
</clickhouse>
```
For the SSL [`chain of trust`](https://en.wikipedia.org/wiki/Chain_of_trust) to work correctly, it is also important to make sure that the [`caConfig`](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) parameter is configured properly.
For the SSL [`chain of trust`](https://en.wikipedia.org/wiki/Chain_of_trust) to work correctly, it is also important to make sure that the [`caConfig`](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) parameter is configured properly.

View File

@ -2,10 +2,9 @@
slug: /en/operations/opentelemetry
sidebar_position: 62
sidebar_label: OpenTelemetry Support
title: "[experimental] OpenTelemetry Support"
---
# [experimental] OpenTelemetry Support
[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from the distributed application. ClickHouse has some support for OpenTelemetry.
:::warning

View File

@ -1,7 +1,7 @@
---
slug: /en/operations/settings/merge-tree-settings
title: "MergeTree tables settings"
---
# MergeTree tables settings
The values of `merge_tree` settings (for all MergeTree tables) can be viewed in the table `system.merge_tree_settings`, they can be overridden in `config.xml` in the `merge_tree` section, or set in the `SETTINGS` section of each table.

View File

@ -1,11 +1,10 @@
---
slug: /en/operations/storing-data
sidebar_position: 68
sidebar_label: External Disks for Storing Data
sidebar_label: "External Disks for Storing Data"
title: "External Disks for Storing Data"
---
# External Disks for Storing Data
Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)).
To work with data stored on `Amazon S3` disks use [S3](../engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](../engines/table-engines/integrations/hdfs.md) table engine.
@ -321,4 +320,4 @@ Zero-copy replication is possible, but not recommended, with `S3` and `HDFS` di
:::warning Zero-copy replication is not ready for production
Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use.
:::
:::

View File

@ -1,9 +1,8 @@
---
slug: /en/operations/utilities/clickhouse-compressor
title: clickhouse-compressor
---
# clickhouse-compressor
Simple program for data compression and decompression.
### Examples

View File

@ -1,112 +1,112 @@
---
slug: /en/operations/utilities/clickhouse-format
title: clickhouse-format
---
# clickhouse-format
Allows formatting input queries.
Keys:
- `--help` or`-h` — Produce help message.
- `--query` — Format queries of any length and complexity.
- `--hilite` — Add syntax highlight with ANSI terminal escape sequences.
- `--oneline` — Format in single line.
- `--quiet` or `-q` — Just check syntax, no output on success.
- `--multiquery` or `-n` — Allow multiple queries in the same file.
- `--obfuscate` — Obfuscate instead of formatting.
- `--seed <string>` — Seed arbitrary string that determines the result of obfuscation.
- `--backslash` — Add a backslash at the end of each line of the formatted query. Can be useful when you copy a query from web or somewhere else with multiple lines, and want to execute it in command line.
## Examples {#examples}
1. Formatting a query:
```bash
$ clickhouse-format --query "select number from numbers(10) where number%2 order by number desc;"
```
Result:
```text
SELECT number
FROM numbers(10)
WHERE number % 2
ORDER BY number DESC
```
2. Highlighting and single line:
```bash
$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);"
```
Result:
```sql
SELECT sum(number) FROM numbers(5)
```
3. Multiqueries:
```bash
$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
```
Result:
```text
SELECT *
FROM
(
SELECT 1 AS x
UNION ALL
SELECT 1
UNION DISTINCT
SELECT 3
)
;
```
4. Obfuscating:
```bash
$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;"
```
Result:
```text
SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END;
```
Same query and another seed string:
```bash
$ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;"
```
Result:
```text
SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END;
```
5. Adding backslash:
```bash
$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
```
Result:
```text
SELECT * \
FROM \
( \
SELECT 1 AS x \
UNION ALL \
SELECT 1 \
UNION DISTINCT \
SELECT 3 \
)
```
Allows formatting input queries.
Keys:
- `--help` or`-h` — Produce help message.
- `--query` — Format queries of any length and complexity.
- `--hilite` — Add syntax highlight with ANSI terminal escape sequences.
- `--oneline` — Format in single line.
- `--quiet` or `-q` — Just check syntax, no output on success.
- `--multiquery` or `-n` — Allow multiple queries in the same file.
- `--obfuscate` — Obfuscate instead of formatting.
- `--seed <string>` — Seed arbitrary string that determines the result of obfuscation.
- `--backslash` — Add a backslash at the end of each line of the formatted query. Can be useful when you copy a query from web or somewhere else with multiple lines, and want to execute it in command line.
## Examples {#examples}
1. Formatting a query:
```bash
$ clickhouse-format --query "select number from numbers(10) where number%2 order by number desc;"
```
Result:
```text
SELECT number
FROM numbers(10)
WHERE number % 2
ORDER BY number DESC
```
2. Highlighting and single line:
```bash
$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);"
```
Result:
```sql
SELECT sum(number) FROM numbers(5)
```
3. Multiqueries:
```bash
$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
```
Result:
```text
SELECT *
FROM
(
SELECT 1 AS x
UNION ALL
SELECT 1
UNION DISTINCT
SELECT 3
)
;
```
4. Obfuscating:
```bash
$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;"
```
Result:
```text
SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END;
```
Same query and another seed string:
```bash
$ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;"
```
Result:
```text
SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END;
```
5. Adding backslash:
```bash
$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
```
Result:
```text
SELECT * \
FROM \
( \
SELECT 1 AS x \
UNION ALL \
SELECT 1 \
UNION DISTINCT \
SELECT 3 \
)
```

View File

@ -1,7 +1,7 @@
---
slug: /en/operations/utilities/clickhouse-obfuscator
title: clickhouse-obfuscator
---
# clickhouse-obfuscator
A simple tool for table data obfuscation.

View File

@ -1,7 +1,7 @@
---
slug: /en/operations/utilities/odbc-bridge
title: clickhouse-odbc-bridge
---
# clickhouse-odbc-bridge
Simple HTTP-server which works like a proxy for ODBC driver. The main motivation
was possible segfaults or another faults in ODBC implementations, which can

View File

@ -1,10 +1,9 @@
---
slug: /en/sql-reference/aggregate-functions/reference/deltasumtimestamp
sidebar_position: 141
title: deltaSumTimestamp
---
# deltaSumTimestamp
Adds the difference between consecutive rows. If the difference is negative, it is ignored.
This function is primarily for [materialized views](../../../sql-reference/statements/create/view.md#materialized) that are ordered by some time bucket-aligned timestamp, for example, a `toStartOfMinute` bucket. Because the rows in such a materialized view will all have the same timestamp, it is impossible for them to be merged in the "right" order. This function keeps track of the `timestamp` of the values it's seen, so it's possible to order the states correctly during merging.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/aggregate-functions/reference/intervalLengthSum
sidebar_position: 146
sidebar_label: intervalLengthSum
title: intervalLengthSum
---
# intervalLengthSum
Calculates the total length of union of all ranges (segments on numeric axis).
**Syntax**

View File

@ -1,10 +1,9 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantilebfloat16
sidebar_position: 209
title: quantileBFloat16
---
# quantileBFloat16
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. `bfloat16` is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits.
The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates `bfloat16` quantile value and converts the result to a 64-bit float by appending zero bits.
The function is a fast quantile estimator with a relative error no more than 0.390625%.

View File

@ -1,10 +1,9 @@
---
slug: /en/sql-reference/aggregate-functions/reference/sumcount
sidebar_position: 144
title: sumCount
---
# sumCount
Calculates the sum of the numbers and counts the number of rows at the same time. The function is used by ClickHouse query optimizer: if there are multiple `sum`, `count` or `avg` functions in a query, they can be replaced to single `sumCount` function to reuse the calculations. The function is rarely needed to use explicitly.
**Syntax**

View File

@ -1,10 +1,9 @@
---
slug: /en/sql-reference/aggregate-functions/reference/sumkahan
sidebar_position: 145
title: sumKahan
---
# sumKahan
Calculates the sum of the numbers with [Kahan compensated summation algorithm](https://en.wikipedia.org/wiki/Kahan_summation_algorithm)
Slower than [sum](./sum.md) function.
The compensation works only for [Float](../../../sql-reference/data-types/float.md) types.
@ -38,4 +37,4 @@ Result:
┌───────────sum(0.1)─┬─sumKahan(0.1)─┐
│ 0.9999999999999999 │ 1 │
└────────────────────┴───────────────┘
```
```

View File

@ -46,7 +46,7 @@ Binary operations on Decimal result in wider result type (with any order of argu
Rules for scale:
- add, subtract: S = max(S1, S2).
- multuply: S = S1 + S2.
- multiply: S = S1 + S2.
- divide: S = S1.
For similar operations between Decimal and integers, the result is Decimal of the same size as an argument.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/data-types/geo
sidebar_position: 62
sidebar_label: Geo
title: "Geo Data Types"
---
# Geo Data Types
ClickHouse supports data types for representing geographical objects — locations, lands, etc.
:::warning

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/data-types/multiword-types
sidebar_position: 61
sidebar_label: Multiword Type Names
title: "Multiword Types"
---
# Multiword Types
When creating tables, you can use data types with a name consisting of several words. This is implemented for better SQL compatibility.
## Multiword Types Support

View File

@ -411,7 +411,7 @@ If setting `allow_read_expired_keys` is set to 1, by default 0. Then dictionary
To improve cache performance, use a subquery with `LIMIT`, and call the function with the dictionary externally.
Supported [sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md): MySQL, ClickHouse, executable, HTTP.
All types of sources are supported.
Example of settings:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon
sidebar_position: 46
sidebar_label: Polygon Dictionaries With Grids
title: "Polygon dictionaries"
---
# Polygon dictionaries
Polygon dictionaries allow you to efficiently search for the polygon containing specified points.
For example: defining a city area by geographical coordinates.

View File

@ -1069,7 +1069,7 @@ Formats a Time according to the given Format string. Format is a constant expres
**Syntax**
``` sql
formatDateTime(Time, Format\[, Timezone\])
formatDateTime(Time, Format[, Timezone])
```
**Returned value(s)**
@ -1105,6 +1105,7 @@ Using replacement fields, you can define a pattern for the resulting string. “
| %w | weekday as a decimal number with Sunday as 0 (0-6) | 2 |
| %y | Year, last two digits (00-99) | 18 |
| %Y | Year | 2018 |
| %z | Time offset from UTC as +HHMM or -HHMM | -0500 |
| %% | a % sign | % |
**Example**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/functions/encryption-functions
sidebar_position: 67
sidebar_label: Encryption
title: "Encryption functions"
---
# Encryption functions
These functions implement encryption and decryption of data with AES (Advanced Encryption Standard) algorithm.
Key length depends on encryption mode. It is 16, 24, and 32 bytes long for `-128-`, `-196-`, and `-256-` modes respectively.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/functions/files
sidebar_position: 43
sidebar_label: Files
title: "Functions for Working with Files"
---
# Functions for Working with Files
## file
Reads file as a String. The file content is not parsed, so any information is read as one string and placed into the specified column.

View File

@ -2,11 +2,9 @@
slug: /en/sql-reference/functions/geo/coordinates
sidebar_label: Geographical Coordinates
sidebar_position: 62
title: "Functions for Working with Geographical Coordinates"
---
# Functions for Working with Geographical Coordinates
## greatCircleDistance
Calculates the distance between two points on the Earths surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance).

View File

@ -1,10 +1,9 @@
---
slug: /en/sql-reference/functions/geo/geohash
sidebar_label: Geohash
title: "Functions for Working with Geohash"
---
# Functions for Working with Geohash
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earths surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location.
If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/).

View File

@ -1,10 +1,9 @@
---
slug: /en/sql-reference/functions/geo/h3
sidebar_label: H3 Indexes
title: "Functions for Working with H3 Indexes"
---
# Functions for Working with H3 Indexes
[H3](https://eng.uber.com/h3/) is a geographical indexing system where Earths surface divided into a grid of even hexagonal cells. This system is hierarchical, i. e. each hexagon on the top level ("parent") can be split into seven even but smaller ones ("children"), and so on.
The level of the hierarchy is called `resolution` and can receive a value from `0` till `15`, where `0` is the `base` level with the largest and coarsest cells.

View File

@ -2,9 +2,9 @@
slug: /en/sql-reference/functions/geo/
sidebar_label: Geo
sidebar_position: 62
title: "Geo Functions"
---
# Geo Functions
## Geographical Coordinates Functions

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/functions/nlp-functions
sidebar_position: 67
sidebar_label: NLP
title: "[experimental] Natural Language Processing functions"
---
# [experimental] Natural Language Processing functions
:::warning
This is an experimental feature that is currently in development and is not ready for general use. It will change in unpredictable backwards-incompatible ways in future releases. Set `allow_experimental_nlp_functions = 1` to enable it.
:::

View File

@ -495,25 +495,23 @@ If the s string is non-empty and does not contain the c character at
Returns the string s that was converted from the encoding in from to the encoding in to.
## Base58Encode(plaintext), Base58Decode(encoded_text)
## base58Encode(plaintext)
Accepts a String and encodes/decodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
Accepts a String and encodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
**Syntax**
```sql
base58Encode(decoded)
base58Decode(encoded)
base58Encode(plaintext)
```
**Arguments**
- `decoded` — [String](../../sql-reference/data-types/string.md) column or constant.
- `encoded` — [String](../../sql-reference/data-types/string.md) column or constant. If the string is not a valid base58-encoded value, an exception is thrown.
- `plaintext` — [String](../../sql-reference/data-types/string.md) column or constant.
**Returned value**
- A string containing encoded/decoded value of 1st argument.
- A string containing encoded value of 1st argument.
Type: [String](../../sql-reference/data-types/string.md).
@ -523,17 +521,48 @@ Query:
``` sql
SELECT base58Encode('Encoded');
SELECT base58Encode('3dc8KtHrwM');
```
Result:
```text
┌─encodeBase58('Encoded')─┐
│ 3dc8KtHrwM │
└──────────────────────────────────┘
┌─decodeBase58('3dc8KtHrwM')─┐
│ Encoded │
└────────────────────────────────────┘
┌─base58Encode('Encoded')─┐
│ 3dc8KtHrwM │
└─────────────────────────┘
```
## base58Decode(encoded_text)
Accepts a String and decodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
**Syntax**
```sql
base58Decode(encoded_text)
```
**Arguments**
- `encoded_text` — [String](../../sql-reference/data-types/string.md) column or constant. If the string is not a valid base58-encoded value, an exception is thrown.
**Returned value**
- A string containing decoded value of 1st argument.
Type: [String](../../sql-reference/data-types/string.md).
**Example**
Query:
``` sql
SELECT base58Decode('3dc8KtHrwM');
```
Result:
```text
┌─base58Decode('3dc8KtHrwM')─┐
│ Encoded │
└────────────────────────────┘
```
## base64Encode(s)

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/functions/tuple-functions
sidebar_position: 66
sidebar_label: Tuples
title: "Functions for Working with Tuples"
---
# Functions for Working with Tuples
## tuple
A function that allows grouping multiple columns.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/functions/tuple-map-functions
sidebar_position: 46
sidebar_label: Working with maps
title: "Functions for maps"
---
# Functions for maps
## map
Arranges `key:value` pairs into [Map(key, value)](../../sql-reference/data-types/map.md) data type.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/column
sidebar_position: 37
sidebar_label: COLUMN
title: "Column Manipulations"
---
# Column Manipulations
A set of queries that allow changing the table structure.
Syntax:

View File

@ -9,8 +9,8 @@ sidebar_label: CONSTRAINT
Constraints could be added or deleted using following syntax:
``` sql
ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression;
ALTER TABLE [db].name DROP CONSTRAINT constraint_name;
ALTER TABLE [db].name [ON CLUSTER cluster] ADD CONSTRAINT constraint_name CHECK expression;
ALTER TABLE [db].name [ON CLUSTER cluster] DROP CONSTRAINT constraint_name;
```
See more on [constraints](../../../sql-reference/statements/create/table.md#constraints).

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/partition
sidebar_position: 38
sidebar_label: PARTITION
title: "Manipulating Partitions and Parts"
---
# Manipulating Partitions and Parts
The following operations with [partitions](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) are available:
- [DETACH PARTITION\|PART](#detach-partitionpart) — Moves a partition or part to the `detached` directory and forget it.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/projection
sidebar_position: 49
sidebar_label: PROJECTION
title: "Manipulating Projections"
---
# Manipulating Projections
The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available:
- `ALTER TABLE [db].name ADD PROJECTION name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
@ -23,4 +22,4 @@ Also, they are replicated, syncing projections metadata via ZooKeeper.
:::note
Projection manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants).
:::
:::

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/quota
sidebar_position: 46
sidebar_label: QUOTA
title: "ALTER QUOTA"
---
# ALTER QUOTA
Changes quotas.
Syntax:
@ -37,4 +36,4 @@ For the default user limit the maximum execution time with half a second in 30 m
``` sql
ALTER QUOTA IF EXISTS qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default;
```
```

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/sample-by
sidebar_position: 41
sidebar_label: SAMPLE BY
title: "Manipulating Sampling-Key Expressions"
---
# Manipulating Sampling-Key Expressions
Syntax:
``` sql
@ -18,4 +17,4 @@ The command is lightweight in the sense that it only changes metadata. The prima
:::note
It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
:::
:::

View File

@ -11,7 +11,7 @@ sidebar_label: TTL
You can change [table TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) with a request of the following form:
``` sql
ALTER TABLE table_name MODIFY TTL ttl_expression;
ALTER TABLE [db.]table_name [ON CLUSTER cluster] MODIFY TTL ttl_expression;
```
## REMOVE TTL
@ -19,7 +19,7 @@ ALTER TABLE table_name MODIFY TTL ttl_expression;
TTL-property can be removed from table with the following query:
```sql
ALTER TABLE table_name REMOVE TTL
ALTER TABLE [db.]table_name [ON CLUSTER cluster] REMOVE TTL
```
**Example**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/user
sidebar_position: 45
sidebar_label: USER
title: "ALTER USER"
---
# ALTER USER
Changes ClickHouse user accounts.
Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/attach
sidebar_position: 40
sidebar_label: ATTACH
title: "ATTACH Statement"
---
# ATTACH Statement
Attaches a table or a dictionary, for example, when moving a database to another server.
**Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/check-table
sidebar_position: 41
sidebar_label: CHECK
title: "CHECK TABLE Statement"
---
# CHECK TABLE Statement
Checks if the data in the table is corrupted.
``` sql

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/dictionary
sidebar_position: 38
sidebar_label: DICTIONARY
title: "CREATE DICTIONARY"
---
# CREATE DICTIONARY
Creates a new [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
**Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/quota
sidebar_position: 42
sidebar_label: QUOTA
title: "CREATE QUOTA"
---
# CREATE QUOTA
Creates a [quota](../../../operations/access-rights.md#quotas-management) that can be assigned to a user or a role.
Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/role
sidebar_position: 40
sidebar_label: ROLE
title: "CREATE ROLE"
---
# CREATE ROLE
Creates new [roles](../../../operations/access-rights.md#role-management). Role is a set of [privileges](../../../sql-reference/statements/grant.md#grant-privileges). A [user](../../../sql-reference/statements/create/user.md) assigned a role gets all the privileges of this role.
Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/row-policy
sidebar_position: 41
sidebar_label: ROW POLICY
title: "CREATE ROW POLICY"
---
# CREATE ROW POLICY
Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table.
:::warning

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/settings-profile
sidebar_position: 43
sidebar_label: SETTINGS PROFILE
title: "CREATE SETTINGS PROFILE"
---
# CREATE SETTINGS PROFILE
Creates [settings profiles](../../../operations/access-rights.md#settings-profiles-management) that can be assigned to a user or a role.
Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/table
sidebar_position: 36
sidebar_label: TABLE
title: "CREATE TABLE"
---
# CREATE TABLE
Creates a new table. This query can have various syntax forms depending on a use case.
By default, tables are created only on the current server. Distributed DDL queries are implemented as `ON CLUSTER` clause, which is [described separately](../../../sql-reference/distributed-ddl.md).

View File

@ -2,13 +2,9 @@
slug: /en/sql-reference/statements/create/user
sidebar_position: 39
sidebar_label: USER
tags:
- create user
- add user
title: "CREATE USER"
---
# CREATE USER
Creates [user accounts](../../../operations/access-rights.md#user-account-management).
Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/describe-table
sidebar_position: 42
sidebar_label: DESCRIBE
title: "DESCRIBE TABLE"
---
# DESCRIBE TABLE
Returns information about table columns.
**Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/detach
sidebar_position: 43
sidebar_label: DETACH
title: "DETACH Statement"
---
# DETACH Statement
Makes the server "forget" about the existence of a table, a materialized view, or a dictionary.
**Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/explain
sidebar_position: 39
sidebar_label: EXPLAIN
title: "EXPLAIN Statement"
---
# EXPLAIN Statement
Shows the execution plan of a statement.
Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/kill
sidebar_position: 46
sidebar_label: KILL
title: "KILL Statements"
---
# KILL Statements
There are two kinds of kill statements: to kill a query and to kill a mutation
## KILL QUERY

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/optimize
sidebar_position: 47
sidebar_label: OPTIMIZE
title: "OPTIMIZE Statement"
---
# OPTIMIZE Statement
This query tries to initialize an unscheduled merge of data parts for tables.
:::warning

View File

@ -1,10 +1,9 @@
---
slug: /en/sql-reference/statements/select/offset
sidebar_label: OFFSET
title: "OFFSET FETCH Clause"
---
# OFFSET FETCH Clause
`OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query.
``` sql

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/set-role
sidebar_position: 51
sidebar_label: SET ROLE
title: "SET ROLE Statement"
---
# SET ROLE Statement
Activates roles for the current user.
``` sql

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/cluster
sidebar_position: 50
sidebar_label: cluster
title: "cluster, clusterAllReplicas"
---
# cluster, clusterAllReplicas
Allows to access all shards in an existing cluster which configured in `remote_servers` section without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. One replica of each shard is queried.
`clusterAllReplicas` function — same as `cluster`, but all replicas are queried. Each replica in a cluster is used as a separate shard/connection.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/dictionary
sidebar_position: 54
sidebar_label: dictionary function
title: dictionary
---
# dictionary
Displays the [dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) data as a ClickHouse table. Works the same way as [Dictionary](../../engines/table-engines/special/dictionary.md) engine.
**Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/null
sidebar_position: 53
sidebar_label: null function
title: 'null'
---
# null
Creates a temporary table of the specified structure with the [Null](../../engines/table-engines/special/null.md) table engine. According to the `Null`-engine properties, the table data is ignored and the table itself is immediately dropped right after the query execution. The function is used for the convenience of test writing and demonstrations.
**Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/s3Cluster
sidebar_position: 55
sidebar_label: s3Cluster
title: "s3Cluster Table Function"
---
# s3Cluster Table Function
Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterics in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished.
**Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/sqlite
sidebar_position: 55
sidebar_label: sqlite
title: sqlite
---
## sqlite
Allows to perform queries on a data stored in an [SQLite](../../engines/database-engines/sqlite.md) database.
**Syntax**
@ -43,4 +42,4 @@ Result:
**See Also**
- [SQLite](../../engines/table-engines/integrations/sqlite.md) table engine
- [SQLite](../../engines/table-engines/integrations/sqlite.md) table engine

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/view
sidebar_position: 51
sidebar_label: view
title: view
---
## view
Turns a subquery into a table. The function implements views (see [CREATE VIEW](https://clickhouse.com/docs/en/sql-reference/statements/create/view/#create-view)). The resulting table does not store data, but only stores the specified `SELECT` query. When reading from the table, ClickHouse executes the query and deletes all unnecessary columns from the result.
**Syntax**

View File

@ -344,7 +344,7 @@ Eсли суммарное число активных кусков во все
**Использование**
Значение настройки `min_bytes_to_rebalance_partition_over_jbod` должно быть меньше значения настройки [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool). Иначе ClickHouse сгенерирует исключение.
Значение настройки `min_bytes_to_rebalance_partition_over_jbod` должно быть не меньше значения настройки [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool) / 1024. Иначе ClickHouse сгенерирует исключение.
## detach_not_byte_identical_parts {#detach_not_byte_identical_parts}

View File

@ -407,7 +407,7 @@ RANGE(MIN StartDate MAX EndDate);
Чтобы увеличить производительность кэша, используйте подзапрос с `LIMIT`, а снаружи вызывайте функцию со словарём.
Поддерживаются [источники](external-dicts-dict-sources.md): MySQL, ClickHouse, executable, HTTP.
Поддерживаются все виды источников.
Пример настройки:

View File

@ -1017,7 +1017,7 @@ SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64
**Синтаксис**
``` sql
formatDateTime(Time, Format\[, Timezone\])
formatDateTime(Time, Format[, Timezone])
```
**Возвращаемое значение**

View File

@ -16,7 +16,7 @@ sidebar_label: "Функции для работы со строками"
empty(x)
```
Строка считается непустой, если содержит хотя бы один байт, пусть даже это пробел или нулевой байт.
Строка считается непустой, если содержит хотя бы один байт, пусть даже это пробел или нулевой байт.
Функция также поддерживает работу с типами [Array](array-functions.md#function-empty) и [UUID](uuid-functions.md#empty).
@ -56,7 +56,7 @@ SELECT empty('text');
notEmpty(x)
```
Строка считается непустой, если содержит хотя бы один байт, пусть даже это пробел или нулевой байт.
Строка считается непустой, если содержит хотя бы один байт, пусть даже это пробел или нулевой байт.
Функция также поддерживает работу с типами [Array](array-functions.md#function-notempty) и [UUID](uuid-functions.md#notempty).
@ -491,21 +491,21 @@ SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY (key1, key2);
Возвращает сконвертированную из кодировки from в кодировку to строку s.
## Base58Encode(plaintext), Base58Decode(encoded_text) {#base58}
## base58Encode(plaintext), base58Decode(encoded_text) {#base58}
Принимает на вход строку или колонку строк и кодирует/раскодирует их с помощью схемы кодирования [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) с использованием стандартного алфавита Bitcoin.
**Синтаксис**
```sql
encodeBase58(decoded)
decodeBase58(encoded)
base58Encode(decoded)
base58Decode(encoded)
```
**Аргументы**
- `decoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md).
- `encoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md). Если входная строка не является корректным кодом для какой-либо другой строки, возникнет исключение `1001`.
- `encoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md). Если входная строка не является корректным кодом для какой-либо другой строки, возникнет исключение.
**Возвращаемое значение**
@ -518,18 +518,18 @@ decodeBase58(encoded)
Запрос:
``` sql
SELECT encodeBase58('encode');
SELECT decodeBase58('izCFiDUY');
SELECT base58Encode('Encoded');
SELECT base58Decode('3dc8KtHrwM');
```
Результат:
```text
┌─encodeBase58('encode', 'flickr')─┐
SvyTHb1D
└──────────────────────────────────
┌─decodeBase58('izCFiDUY', 'ripple')─┐
decode
└────────────────────────────────────
┌─base58Encode('Encoded')─┐
3dc8KtHrwM
└─────────────────────────┘
┌─base58Decode('3dc8KtHrwM')─┐
Encoded
└────────────────────────────┘
```
## base64Encode(s) {#base64encode}

View File

@ -11,8 +11,8 @@ sidebar_label: "Манипуляции с ограничениями"
Добавить или удалить ограничение можно с помощью запросов
``` sql
ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression;
ALTER TABLE [db].name DROP CONSTRAINT constraint_name;
ALTER TABLE [db].name [ON CLUSTER cluster] ADD CONSTRAINT constraint_name CHECK expression;
ALTER TABLE [db].name [ON CLUSTER cluster] DROP CONSTRAINT constraint_name;
```
Запросы выполняют добавление или удаление метаданных об ограничениях таблицы `[db].name`, поэтому выполняются мгновенно.

View File

@ -1,5 +1,5 @@
---
slug: /ru/sql-reference/statements/alter/
slug: /ru/sql-reference/statements/alter/index
toc_hidden_folder: true
sidebar_position: 42
sidebar_label: "Манипуляции с индексами"

View File

@ -11,7 +11,7 @@ sidebar_label: TTL
Вы можете изменить [TTL для таблицы](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl) запросом следующего вида:
``` sql
ALTER TABLE table-name MODIFY TTL ttl-expression
ALTER TABLE [db.]table-name [ON CLUSTER cluster] MODIFY TTL ttl-expression
```
## REMOVE TTL {#remove-ttl}
@ -19,7 +19,7 @@ ALTER TABLE table-name MODIFY TTL ttl-expression
Удалить табличный TTL можно запросом следующего вида:
```sql
ALTER TABLE table_name REMOVE TTL
ALTER TABLE [db.]table_name [ON CLUSTER cluster] REMOVE TTL
```
**Пример**
@ -83,4 +83,4 @@ SELECT * FROM table_with_ttl;
### Смотрите также
- Подробнее о [свойстве TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl).
- Изменить столбец [с TTL](../../../sql-reference/statements/alter/column.md#alter_modify-column).
- Изменить столбец [с TTL](../../../sql-reference/statements/alter/column.md#alter_modify-column).

View File

@ -153,8 +153,8 @@ ClickHouse只有一个物理排序由 `order by` 条件决定。要创建一
* 修改列类型。必须与原始类型兼容,否则复制将失败。例如,可以将`UInt32`列修改为`UInt64`,不能将 `String` 列修改为 `Array(String)`
* 修改 [column TTL](../table-engines/mergetree-family/mergetree/#mergetree-column-ttl).
* 修改 [column compression codec](../../sql-reference/statements/create/table.md/#codecs).
* 增加 [ALIAS columns](../../sql-reference/statements/create/table.md/#alias).
* 修改 [column compression codec](../../sql-reference/statements/create/table.mdx#codecs).
* 增加 [ALIAS columns](../../sql-reference/statements/create/table.mdx#alias).
* 增加 [skipping indexes](../table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes)
* 增加 [projections](../table-engines/mergetree-family/mergetree/#projections).
请注意,当使用 `SELECT ... FINAL ` (MaterializedMySQL默认是这样做的) 时,预测优化是被禁用的,所以这里是受限的, `INDEX ... TYPE hypothesis `[在v21.12的博客文章中描述]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)可能在这种情况下更有用。

View File

@ -34,7 +34,7 @@ CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_na
当创建数据库的新副本时,该副本会自己创建表。如果副本已经不可用很长一段时间,并且已经滞后于复制日志-它用ZooKeeper中的当前元数据检查它的本地元数据将带有数据的额外表移动到一个单独的非复制数据库(以免意外地删除任何多余的东西),创建缺失的表,如果表名已经被重命名,则更新表名。数据在`ReplicatedMergeTree`级别被复制,也就是说,如果表没有被复制,数据将不会被复制(数据库只负责元数据)。
允许[`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.md)查询,但不允许复制。数据库引擎将只向当前副本添加/获取/删除分区/部件。但是如果表本身使用了Replicated表引擎那么数据将在使用`ATTACH`后被复制。
允许[`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.mdx)查询,但不允许复制。数据库引擎将只向当前副本添加/获取/删除分区/部件。但是如果表本身使用了Replicated表引擎那么数据将在使用`ATTACH`后被复制。
## 使用示例 {#usage-example}
创建三台主机的集群:

View File

@ -1 +0,0 @@
../../../../en/engines/table-engines/integrations/ExternalDistributed.md

View File

@ -0,0 +1,10 @@
---
slug: /zh/engines/table-engines/integrations/ExternalDistributed
sidebar_position: 12
sidebar_label: ExternalDistributed
title: ExternalDistributed
---
import Content from '@site/docs/en/engines/table-engines/integrations/ExternalDistributed.md';
<Content />

View File

@ -25,7 +25,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
) ENGINE = Hive('thrift://host:port', 'database', 'table');
PARTITION BY expr
```
查看[CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query)查询的详细描述。
查看[CREATE TABLE](../../../sql-reference/statements/create/table.mdx#create-table-query)查询的详细描述。
表的结构可以与原来的Hive表结构有所不同:
- 列名应该与原来的Hive表相同但你可以使用这些列中的一些并以任何顺序你也可以使用一些从其他列计算的别名列。

View File

@ -1 +0,0 @@
../../../../en/engines/table-engines/integrations/materialized-postgresql.md

View File

@ -0,0 +1,10 @@
---
slug: /zh/engines/table-engines/integrations/materialized-postgresql
sidebar_position: 12
sidebar_label: MaterializedPostgreSQL
title: MaterializedPostgreSQL
---
import Content from '@site/docs/en/engines/table-engines/integrations/materialized-postgresql.md';
<Content />

View File

@ -19,7 +19,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]);
```
<!-- 详情请见 [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) 查询. -->
<!-- 详情请见 [CREATE TABLE](../../../sql-reference/statements/create/table.mdx#create-table-query) 查询. -->
表结构可以与 PostgreSQL 源表结构不同:

View File

@ -57,4 +57,4 @@ SELECT * FROM sqlite_db.table2 ORDER BY col1;
**详见**
- [SQLite](../../../engines/database-engines/sqlite.md) 引擎
- [sqlite](../../../sql-reference/table-functions/sqlite.md) 表方法函数
- [sqlite](../../../sql-reference/table-functions/sqlite.mdx) 表方法函数

Some files were not shown because too many files have changed in this diff Show More