Merge branch 'master' into zookeeper_client_fault_injection

This commit is contained in:
Alexander Tokmakov 2022-08-30 15:43:30 +03:00 committed by GitHub
commit 6a50c20734
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
290 changed files with 1628 additions and 644 deletions

View File

@ -0,0 +1,22 @@
#define _GNU_SOURCE
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include "syscall.h"
int dup3(int old, int new, int flags)
{
int r;
#ifdef SYS_dup2
if (old==new) return __syscall_ret(-EINVAL);
if (flags & O_CLOEXEC) {
while ((r=__syscall(SYS_dup3, old, new, flags))==-EBUSY);
if (r!=-ENOSYS) return __syscall_ret(r);
}
while ((r=__syscall(SYS_dup2, old, new))==-EBUSY);
if (flags & O_CLOEXEC) __syscall(SYS_fcntl, new, F_SETFD, FD_CLOEXEC);
#else
while ((r=__syscall(SYS_dup3, old, new, flags))==-EBUSY);
#endif
return __syscall_ret(r);
}

View File

@ -0,0 +1,26 @@
#include <sys/inotify.h>
#include <errno.h>
#include "syscall.h"
int inotify_init()
{
return inotify_init1(0);
}
int inotify_init1(int flags)
{
int r = __syscall(SYS_inotify_init1, flags);
#ifdef SYS_inotify_init
if (r==-ENOSYS && !flags) r = __syscall(SYS_inotify_init);
#endif
return __syscall_ret(r);
}
int inotify_add_watch(int fd, const char *pathname, uint32_t mask)
{
return syscall(SYS_inotify_add_watch, fd, pathname, mask);
}
int inotify_rm_watch(int fd, int wd)
{
return syscall(SYS_inotify_rm_watch, fd, wd);
}

2
contrib/libuv vendored

@ -1 +1 @@
Subproject commit 95081e7c16c9857babe6d4e2bc1c779198ea89ae Subproject commit 3a85b2eb3d83f369b8a8cafd329d7e9dc28f60cf

View File

@ -15,6 +15,7 @@ set(uv_sources
src/inet.c src/inet.c
src/random.c src/random.c
src/strscpy.c src/strscpy.c
src/strtok.c
src/threadpool.c src/threadpool.c
src/timer.c src/timer.c
src/uv-common.c src/uv-common.c
@ -75,13 +76,13 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112) list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
list(APPEND uv_libraries rt) list(APPEND uv_libraries rt)
list(APPEND uv_sources list(APPEND uv_sources
src/unix/epoll.c
src/unix/linux-core.c src/unix/linux-core.c
src/unix/linux-inotify.c src/unix/linux-inotify.c
src/unix/linux-syscalls.c src/unix/linux-syscalls.c
src/unix/procfs-exepath.c src/unix/procfs-exepath.c
src/unix/random-getrandom.c src/unix/random-getrandom.c
src/unix/random-sysctl-linux.c src/unix/random-sysctl-linux.c)
src/unix/sysinfo-loadavg.c)
endif() endif()
if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD") if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD")
@ -111,6 +112,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS/390")
src/unix/pthread-fixes.c src/unix/pthread-fixes.c
src/unix/pthread-barrier.c src/unix/pthread-barrier.c
src/unix/os390.c src/unix/os390.c
src/unix/os390-proctitle.c
src/unix/os390-syscalls.c) src/unix/os390-syscalls.c)
endif() endif()

View File

@ -0,0 +1,25 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.12.19-lts (4a08f8a073b) FIXME as compared to v22.3.11.12-lts (137c5f72657)
#### Build/Testing/Packaging Improvement
* Backported in [#40695](https://github.com/ClickHouse/ClickHouse/issues/40695): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40160](https://github.com/ClickHouse/ClickHouse/issues/40160): fix HashMethodOneNumber get wrong key value when column is const. [#40020](https://github.com/ClickHouse/ClickHouse/pull/40020) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#40122](https://github.com/ClickHouse/ClickHouse/issues/40122): Fix bug in collectFilesToSkip() by adding correct file extension(.idx or idx2) for indexes to be recalculated, avoid wrong hard links. Fixed [#39896](https://github.com/ClickHouse/ClickHouse/issues/39896). [#40095](https://github.com/ClickHouse/ClickHouse/pull/40095) ([Jianmei Zhang](https://github.com/zhangjmruc)).
* Backported in [#40207](https://github.com/ClickHouse/ClickHouse/issues/40207): Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#40270](https://github.com/ClickHouse/ClickHouse/issues/40270): Fix possible segfault in CapnProto input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* fix heap buffer overflow by limiting http chunk size [#40292](https://github.com/ClickHouse/ClickHouse/pull/40292) ([Sema Checherinda](https://github.com/CheSema)).
* Reduce changelog verbosity in CI [#40360](https://github.com/ClickHouse/ClickHouse/pull/40360) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backport the upstream clickhouse_helper.py [#40490](https://github.com/ClickHouse/ClickHouse/pull/40490) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,17 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.6.7.7-stable (8eae2af3b9a) FIXME as compared to v22.6.6.16-stable (d2a33ebc822)
#### Build/Testing/Packaging Improvement
* Backported in [#40692](https://github.com/ClickHouse/ClickHouse/issues/40692): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40531](https://github.com/ClickHouse/ClickHouse/issues/40531): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#40623](https://github.com/ClickHouse/ClickHouse/issues/40623): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,23 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.7.5.13-stable (6f48d2d1f59) FIXME as compared to v22.7.4.16-stable (0b9272f8fdc)
#### Build/Testing/Packaging Improvement
* Backported in [#40693](https://github.com/ClickHouse/ClickHouse/issues/40693): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40542](https://github.com/ClickHouse/ClickHouse/issues/40542): Fix potential deadlock in WriteBufferFromS3 during task scheduling failure. [#40070](https://github.com/ClickHouse/ClickHouse/pull/40070) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#40450](https://github.com/ClickHouse/ClickHouse/issues/40450): Fix rare bug with column TTL for MergeTree engines family: In case of repeated vertical merge the error `Cannot unlink file ColumnName.bin ... No such file or directory.` could happen. [#40346](https://github.com/ClickHouse/ClickHouse/pull/40346) ([alesapin](https://github.com/alesapin)).
* Backported in [#40532](https://github.com/ClickHouse/ClickHouse/issues/40532): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#40624](https://github.com/ClickHouse/ClickHouse/issues/40624): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* DNSResolver remove AI_V4MAPPED, AI_ALL hints [#40502](https://github.com/ClickHouse/ClickHouse/pull/40502) ([Maksim Kita](https://github.com/kitaisreal)).

View File

@ -0,0 +1,25 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.8.3.13-lts (6a15b73faea) FIXME as compared to v22.8.2.11-lts (b4ed6d744ff)
#### Improvement
* Backported in [#40550](https://github.com/ClickHouse/ClickHouse/issues/40550): Improve schema inference cache, respect format settings that can change the schema. [#40414](https://github.com/ClickHouse/ClickHouse/pull/40414) ([Kruglov Pavel](https://github.com/Avogar)).
#### Build/Testing/Packaging Improvement
* Backported in [#40694](https://github.com/ClickHouse/ClickHouse/issues/40694): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40451](https://github.com/ClickHouse/ClickHouse/issues/40451): Fix rare bug with column TTL for MergeTree engines family: In case of repeated vertical merge the error `Cannot unlink file ColumnName.bin ... No such file or directory.` could happen. [#40346](https://github.com/ClickHouse/ClickHouse/pull/40346) ([alesapin](https://github.com/alesapin)).
* Backported in [#40533](https://github.com/ClickHouse/ClickHouse/issues/40533): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#40625](https://github.com/ClickHouse/ClickHouse/issues/40625): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* DNSResolver remove AI_V4MAPPED, AI_ALL hints [#40502](https://github.com/ClickHouse/ClickHouse/pull/40502) ([Maksim Kita](https://github.com/kitaisreal)).

View File

@ -4,7 +4,7 @@ sidebar_position: 50
sidebar_label: MySQL sidebar_label: MySQL
--- ---
# MySQL # MySQL
Allows to connect to databases on a remote MySQL server and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and MySQL. Allows to connect to databases on a remote MySQL server and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and MySQL.
@ -99,7 +99,7 @@ mysql> select * from mysql_table;
Database in ClickHouse, exchanging data with the MySQL server: Database in ClickHouse, exchanging data with the MySQL server:
``` sql ``` sql
CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') SETTINGS read_write_timeout=10000, connect_timeout=100;
``` ```
``` sql ``` sql

View File

@ -2,10 +2,9 @@
slug: /en/engines/table-engines/integrations/ExternalDistributed slug: /en/engines/table-engines/integrations/ExternalDistributed
sidebar_position: 12 sidebar_position: 12
sidebar_label: ExternalDistributed sidebar_label: ExternalDistributed
title: ExternalDistributed
--- ---
# ExternalDistributed
The `ExternalDistributed` engine allows to perform `SELECT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible. The `ExternalDistributed` engine allows to perform `SELECT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible.
## Creating a Table {#creating-a-table} ## Creating a Table {#creating-a-table}

View File

@ -2,10 +2,9 @@
slug: /en/engines/table-engines/integrations/materialized-postgresql slug: /en/engines/table-engines/integrations/materialized-postgresql
sidebar_position: 12 sidebar_position: 12
sidebar_label: MaterializedPostgreSQL sidebar_label: MaterializedPostgreSQL
title: MaterializedPostgreSQL
--- ---
# MaterializedPostgreSQL
Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database. Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database.
If more than one table is required, it is highly recommended to use the [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) database engine instead of the table engine and use the `materialized_postgresql_tables_list` setting, which specifies the tables to be replicated (will also be possible to add database `schema`). It will be much better in terms of CPU, fewer connections and fewer replication slots inside the remote PostgreSQL database. If more than one table is required, it is highly recommended to use the [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) database engine instead of the table engine and use the `materialized_postgresql_tables_list` setting, which specifies the tables to be replicated (will also be possible to add database `schema`). It will be much better in terms of CPU, fewer connections and fewer replication slots inside the remote PostgreSQL database.

View File

@ -2,10 +2,9 @@
slug: /en/getting-started/example-datasets/brown-benchmark slug: /en/getting-started/example-datasets/brown-benchmark
sidebar_label: Brown University Benchmark sidebar_label: Brown University Benchmark
description: A new analytical benchmark for machine-generated log data description: A new analytical benchmark for machine-generated log data
title: "Brown University Benchmark"
--- ---
# Brown University Benchmark
`MgBench` is a new analytical benchmark for machine-generated log data, [Andrew Crotty](http://cs.brown.edu/people/acrotty/). `MgBench` is a new analytical benchmark for machine-generated log data, [Andrew Crotty](http://cs.brown.edu/people/acrotty/).
Download the data: Download the data:

View File

@ -1,10 +1,9 @@
--- ---
slug: /en/getting-started/example-datasets/cell-towers slug: /en/getting-started/example-datasets/cell-towers
sidebar_label: Cell Towers sidebar_label: Cell Towers
title: "Cell Towers"
--- ---
# Cell Towers
This dataset is from [OpenCellid](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers. This dataset is from [OpenCellid](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers.
As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc). As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc).

View File

@ -1,10 +1,9 @@
--- ---
slug: /en/getting-started/example-datasets/menus slug: /en/getting-started/example-datasets/menus
sidebar_label: New York Public Library "What's on the Menu?" Dataset sidebar_label: New York Public Library "What's on the Menu?" Dataset
title: "New York Public Library \"What's on the Menu?\" Dataset"
--- ---
# New York Public Library "What's on the Menu?" Dataset
The dataset is created by the New York Public Library. It contains historical data on the menus of hotels, restaurants and cafes with the dishes along with their prices. The dataset is created by the New York Public Library. It contains historical data on the menus of hotels, restaurants and cafes with the dishes along with their prices.
Source: http://menus.nypl.org/data Source: http://menus.nypl.org/data

View File

@ -2,10 +2,9 @@
slug: /en/getting-started/example-datasets/opensky slug: /en/getting-started/example-datasets/opensky
sidebar_label: Air Traffic Data sidebar_label: Air Traffic Data
description: The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic. description: The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic.
title: "Crowdsourced air traffic data from The OpenSky Network 2020"
--- ---
# Crowdsourced air traffic data from The OpenSky Network 2020
The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic. It spans all flights seen by the network's more than 2500 members since 1 January 2019. More data will be periodically included in the dataset until the end of the COVID-19 pandemic. The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic. It spans all flights seen by the network's more than 2500 members since 1 January 2019. More data will be periodically included in the dataset until the end of the COVID-19 pandemic.
Source: https://zenodo.org/record/5092942#.YRBCyTpRXYd Source: https://zenodo.org/record/5092942#.YRBCyTpRXYd

View File

@ -1,10 +1,9 @@
--- ---
slug: /en/getting-started/example-datasets/recipes slug: /en/getting-started/example-datasets/recipes
sidebar_label: Recipes Dataset sidebar_label: Recipes Dataset
title: "Recipes Dataset"
--- ---
# Recipes Dataset
RecipeNLG dataset is available for download [here](https://recipenlg.cs.put.poznan.pl/dataset). It contains 2.2 million recipes. The size is slightly less than 1 GB. RecipeNLG dataset is available for download [here](https://recipenlg.cs.put.poznan.pl/dataset). It contains 2.2 million recipes. The size is slightly less than 1 GB.
## Download and Unpack the Dataset ## Download and Unpack the Dataset

View File

@ -2,10 +2,9 @@
slug: /en/getting-started/example-datasets/uk-price-paid slug: /en/getting-started/example-datasets/uk-price-paid
sidebar_label: UK Property Price Paid sidebar_label: UK Property Price Paid
sidebar_position: 1 sidebar_position: 1
title: "UK Property Price Paid"
--- ---
# UK Property Price Paid
The dataset contains data about prices paid for real-estate property in England and Wales. The data is available since year 1995. The dataset contains data about prices paid for real-estate property in England and Wales. The data is available since year 1995.
The size of the dataset in uncompressed form is about 4 GiB and it will take about 278 MiB in ClickHouse. The size of the dataset in uncompressed form is about 4 GiB and it will take about 278 MiB in ClickHouse.

View File

@ -2,10 +2,9 @@
slug: /en/operations/caches slug: /en/operations/caches
sidebar_position: 65 sidebar_position: 65
sidebar_label: Caches sidebar_label: Caches
title: "Cache Types"
--- ---
# Cache Types
When performing queries, ClickHouse uses different caches. When performing queries, ClickHouse uses different caches.
Main cache types: Main cache types:

View File

@ -2,10 +2,9 @@
slug: /en/operations/external-authenticators/ slug: /en/operations/external-authenticators/
sidebar_position: 48 sidebar_position: 48
sidebar_label: External User Authenticators and Directories sidebar_label: External User Authenticators and Directories
title: "External User Authenticators and Directories"
--- ---
# External User Authenticators and Directories
ClickHouse supports authenticating and managing users using external services. ClickHouse supports authenticating and managing users using external services.
The following external authenticators and directories are supported: The following external authenticators and directories are supported:

View File

@ -1,7 +1,7 @@
--- ---
slug: /en/operations/external-authenticators/ldap slug: /en/operations/external-authenticators/ldap
title: "LDAP"
--- ---
# LDAP
LDAP server can be used to authenticate ClickHouse users. There are two different approaches for doing this: LDAP server can be used to authenticate ClickHouse users. There are two different approaches for doing this:

View File

@ -1,7 +1,7 @@
--- ---
slug: /en/operations/external-authenticators/ssl-x509 slug: /en/operations/external-authenticators/ssl-x509
title: "SSL X.509 certificate authentication"
--- ---
# SSL X.509 certificate authentication
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` field of the certificate is used to identify connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration. [SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` field of the certificate is used to identify connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
@ -24,4 +24,4 @@ To enable SSL certificate authentication, a list of `Common Name`'s for each Cli
</clickhouse> </clickhouse>
``` ```
For the SSL [`chain of trust`](https://en.wikipedia.org/wiki/Chain_of_trust) to work correctly, it is also important to make sure that the [`caConfig`](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) parameter is configured properly. For the SSL [`chain of trust`](https://en.wikipedia.org/wiki/Chain_of_trust) to work correctly, it is also important to make sure that the [`caConfig`](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) parameter is configured properly.

View File

@ -2,10 +2,9 @@
slug: /en/operations/opentelemetry slug: /en/operations/opentelemetry
sidebar_position: 62 sidebar_position: 62
sidebar_label: OpenTelemetry Support sidebar_label: OpenTelemetry Support
title: "[experimental] OpenTelemetry Support"
--- ---
# [experimental] OpenTelemetry Support
[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from the distributed application. ClickHouse has some support for OpenTelemetry. [OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from the distributed application. ClickHouse has some support for OpenTelemetry.
:::warning :::warning

View File

@ -1,7 +1,7 @@
--- ---
slug: /en/operations/settings/merge-tree-settings slug: /en/operations/settings/merge-tree-settings
title: "MergeTree tables settings"
--- ---
# MergeTree tables settings
The values of `merge_tree` settings (for all MergeTree tables) can be viewed in the table `system.merge_tree_settings`, they can be overridden in `config.xml` in the `merge_tree` section, or set in the `SETTINGS` section of each table. The values of `merge_tree` settings (for all MergeTree tables) can be viewed in the table `system.merge_tree_settings`, they can be overridden in `config.xml` in the `merge_tree` section, or set in the `SETTINGS` section of each table.

View File

@ -1,11 +1,10 @@
--- ---
slug: /en/operations/storing-data slug: /en/operations/storing-data
sidebar_position: 68 sidebar_position: 68
sidebar_label: External Disks for Storing Data sidebar_label: "External Disks for Storing Data"
title: "External Disks for Storing Data"
--- ---
# External Disks for Storing Data
Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)). Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)).
To work with data stored on `Amazon S3` disks use [S3](../engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](../engines/table-engines/integrations/hdfs.md) table engine. To work with data stored on `Amazon S3` disks use [S3](../engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](../engines/table-engines/integrations/hdfs.md) table engine.
@ -321,4 +320,4 @@ Zero-copy replication is possible, but not recommended, with `S3` and `HDFS` di
:::warning Zero-copy replication is not ready for production :::warning Zero-copy replication is not ready for production
Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use. Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use.
::: :::

View File

@ -1,9 +1,8 @@
--- ---
slug: /en/operations/utilities/clickhouse-compressor slug: /en/operations/utilities/clickhouse-compressor
title: clickhouse-compressor
--- ---
# clickhouse-compressor
Simple program for data compression and decompression. Simple program for data compression and decompression.
### Examples ### Examples

View File

@ -1,112 +1,112 @@
--- ---
slug: /en/operations/utilities/clickhouse-format slug: /en/operations/utilities/clickhouse-format
title: clickhouse-format
--- ---
# clickhouse-format
Allows formatting input queries.
Allows formatting input queries.
Keys:
Keys:
- `--help` or`-h` — Produce help message.
- `--help` or`-h` — Produce help message. - `--query` — Format queries of any length and complexity.
- `--query` — Format queries of any length and complexity. - `--hilite` — Add syntax highlight with ANSI terminal escape sequences.
- `--hilite` — Add syntax highlight with ANSI terminal escape sequences. - `--oneline` — Format in single line.
- `--oneline` — Format in single line. - `--quiet` or `-q` — Just check syntax, no output on success.
- `--quiet` or `-q` — Just check syntax, no output on success. - `--multiquery` or `-n` — Allow multiple queries in the same file.
- `--multiquery` or `-n` — Allow multiple queries in the same file. - `--obfuscate` — Obfuscate instead of formatting.
- `--obfuscate` — Obfuscate instead of formatting. - `--seed <string>` — Seed arbitrary string that determines the result of obfuscation.
- `--seed <string>` — Seed arbitrary string that determines the result of obfuscation. - `--backslash` — Add a backslash at the end of each line of the formatted query. Can be useful when you copy a query from web or somewhere else with multiple lines, and want to execute it in command line.
- `--backslash` — Add a backslash at the end of each line of the formatted query. Can be useful when you copy a query from web or somewhere else with multiple lines, and want to execute it in command line.
## Examples {#examples}
## Examples {#examples}
1. Formatting a query:
1. Formatting a query:
```bash
```bash $ clickhouse-format --query "select number from numbers(10) where number%2 order by number desc;"
$ clickhouse-format --query "select number from numbers(10) where number%2 order by number desc;" ```
```
Result:
Result:
```text
```text SELECT number
SELECT number FROM numbers(10)
FROM numbers(10) WHERE number % 2
WHERE number % 2 ORDER BY number DESC
ORDER BY number DESC ```
```
2. Highlighting and single line:
2. Highlighting and single line:
```bash
```bash $ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);"
$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);" ```
```
Result:
Result:
```sql
```sql SELECT sum(number) FROM numbers(5)
SELECT sum(number) FROM numbers(5) ```
```
3. Multiqueries:
3. Multiqueries:
```bash
```bash $ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" ```
```
Result:
Result:
```text
```text SELECT *
SELECT * FROM
FROM (
( SELECT 1 AS x
SELECT 1 AS x UNION ALL
UNION ALL SELECT 1
SELECT 1 UNION DISTINCT
UNION DISTINCT SELECT 3
SELECT 3 )
) ;
; ```
```
4. Obfuscating:
4. Obfuscating:
```bash
```bash $ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;"
$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" ```
```
Result:
Result:
```text
```text SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END;
SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END; ```
```
Same query and another seed string:
Same query and another seed string:
```bash
```bash $ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;"
$ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" ```
```
Result:
Result:
```text
```text SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END;
SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END; ```
```
5. Adding backslash:
5. Adding backslash:
```bash
```bash $ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" ```
```
Result:
Result:
```text
```text SELECT * \
SELECT * \ FROM \
FROM \ ( \
( \ SELECT 1 AS x \
SELECT 1 AS x \ UNION ALL \
UNION ALL \ SELECT 1 \
SELECT 1 \ UNION DISTINCT \
UNION DISTINCT \ SELECT 3 \
SELECT 3 \ )
) ```
```

View File

@ -1,7 +1,7 @@
--- ---
slug: /en/operations/utilities/clickhouse-obfuscator slug: /en/operations/utilities/clickhouse-obfuscator
title: clickhouse-obfuscator
--- ---
# clickhouse-obfuscator
A simple tool for table data obfuscation. A simple tool for table data obfuscation.

View File

@ -1,7 +1,7 @@
--- ---
slug: /en/operations/utilities/odbc-bridge slug: /en/operations/utilities/odbc-bridge
title: clickhouse-odbc-bridge
--- ---
# clickhouse-odbc-bridge
Simple HTTP-server which works like a proxy for ODBC driver. The main motivation Simple HTTP-server which works like a proxy for ODBC driver. The main motivation
was possible segfaults or another faults in ODBC implementations, which can was possible segfaults or another faults in ODBC implementations, which can

View File

@ -1,10 +1,9 @@
--- ---
slug: /en/sql-reference/aggregate-functions/reference/deltasumtimestamp slug: /en/sql-reference/aggregate-functions/reference/deltasumtimestamp
sidebar_position: 141 sidebar_position: 141
title: deltaSumTimestamp
--- ---
# deltaSumTimestamp
Adds the difference between consecutive rows. If the difference is negative, it is ignored. Adds the difference between consecutive rows. If the difference is negative, it is ignored.
This function is primarily for [materialized views](../../../sql-reference/statements/create/view.md#materialized) that are ordered by some time bucket-aligned timestamp, for example, a `toStartOfMinute` bucket. Because the rows in such a materialized view will all have the same timestamp, it is impossible for them to be merged in the "right" order. This function keeps track of the `timestamp` of the values it's seen, so it's possible to order the states correctly during merging. This function is primarily for [materialized views](../../../sql-reference/statements/create/view.md#materialized) that are ordered by some time bucket-aligned timestamp, for example, a `toStartOfMinute` bucket. Because the rows in such a materialized view will all have the same timestamp, it is impossible for them to be merged in the "right" order. This function keeps track of the `timestamp` of the values it's seen, so it's possible to order the states correctly during merging.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/aggregate-functions/reference/intervalLengthSum slug: /en/sql-reference/aggregate-functions/reference/intervalLengthSum
sidebar_position: 146 sidebar_position: 146
sidebar_label: intervalLengthSum sidebar_label: intervalLengthSum
title: intervalLengthSum
--- ---
# intervalLengthSum
Calculates the total length of union of all ranges (segments on numeric axis). Calculates the total length of union of all ranges (segments on numeric axis).
**Syntax** **Syntax**

View File

@ -1,10 +1,9 @@
--- ---
slug: /en/sql-reference/aggregate-functions/reference/quantilebfloat16 slug: /en/sql-reference/aggregate-functions/reference/quantilebfloat16
sidebar_position: 209 sidebar_position: 209
title: quantileBFloat16
--- ---
# quantileBFloat16
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. `bfloat16` is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits. Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. `bfloat16` is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits.
The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates `bfloat16` quantile value and converts the result to a 64-bit float by appending zero bits. The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates `bfloat16` quantile value and converts the result to a 64-bit float by appending zero bits.
The function is a fast quantile estimator with a relative error no more than 0.390625%. The function is a fast quantile estimator with a relative error no more than 0.390625%.

View File

@ -1,10 +1,9 @@
--- ---
slug: /en/sql-reference/aggregate-functions/reference/sumcount slug: /en/sql-reference/aggregate-functions/reference/sumcount
sidebar_position: 144 sidebar_position: 144
title: sumCount
--- ---
# sumCount
Calculates the sum of the numbers and counts the number of rows at the same time. The function is used by ClickHouse query optimizer: if there are multiple `sum`, `count` or `avg` functions in a query, they can be replaced to single `sumCount` function to reuse the calculations. The function is rarely needed to use explicitly. Calculates the sum of the numbers and counts the number of rows at the same time. The function is used by ClickHouse query optimizer: if there are multiple `sum`, `count` or `avg` functions in a query, they can be replaced to single `sumCount` function to reuse the calculations. The function is rarely needed to use explicitly.
**Syntax** **Syntax**

View File

@ -1,10 +1,9 @@
--- ---
slug: /en/sql-reference/aggregate-functions/reference/sumkahan slug: /en/sql-reference/aggregate-functions/reference/sumkahan
sidebar_position: 145 sidebar_position: 145
title: sumKahan
--- ---
# sumKahan
Calculates the sum of the numbers with [Kahan compensated summation algorithm](https://en.wikipedia.org/wiki/Kahan_summation_algorithm) Calculates the sum of the numbers with [Kahan compensated summation algorithm](https://en.wikipedia.org/wiki/Kahan_summation_algorithm)
Slower than [sum](./sum.md) function. Slower than [sum](./sum.md) function.
The compensation works only for [Float](../../../sql-reference/data-types/float.md) types. The compensation works only for [Float](../../../sql-reference/data-types/float.md) types.
@ -38,4 +37,4 @@ Result:
┌───────────sum(0.1)─┬─sumKahan(0.1)─┐ ┌───────────sum(0.1)─┬─sumKahan(0.1)─┐
│ 0.9999999999999999 │ 1 │ │ 0.9999999999999999 │ 1 │
└────────────────────┴───────────────┘ └────────────────────┴───────────────┘
``` ```

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/data-types/geo slug: /en/sql-reference/data-types/geo
sidebar_position: 62 sidebar_position: 62
sidebar_label: Geo sidebar_label: Geo
title: "Geo Data Types"
--- ---
# Geo Data Types
ClickHouse supports data types for representing geographical objects — locations, lands, etc. ClickHouse supports data types for representing geographical objects — locations, lands, etc.
:::warning :::warning

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/data-types/multiword-types slug: /en/sql-reference/data-types/multiword-types
sidebar_position: 61 sidebar_position: 61
sidebar_label: Multiword Type Names sidebar_label: Multiword Type Names
title: "Multiword Types"
--- ---
# Multiword Types
When creating tables, you can use data types with a name consisting of several words. This is implemented for better SQL compatibility. When creating tables, you can use data types with a name consisting of several words. This is implemented for better SQL compatibility.
## Multiword Types Support ## Multiword Types Support

View File

@ -411,7 +411,7 @@ If setting `allow_read_expired_keys` is set to 1, by default 0. Then dictionary
To improve cache performance, use a subquery with `LIMIT`, and call the function with the dictionary externally. To improve cache performance, use a subquery with `LIMIT`, and call the function with the dictionary externally.
Supported [sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md): MySQL, ClickHouse, executable, HTTP. All types of sources are supported.
Example of settings: Example of settings:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon
sidebar_position: 46 sidebar_position: 46
sidebar_label: Polygon Dictionaries With Grids sidebar_label: Polygon Dictionaries With Grids
title: "Polygon dictionaries"
--- ---
# Polygon dictionaries
Polygon dictionaries allow you to efficiently search for the polygon containing specified points. Polygon dictionaries allow you to efficiently search for the polygon containing specified points.
For example: defining a city area by geographical coordinates. For example: defining a city area by geographical coordinates.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/functions/encryption-functions slug: /en/sql-reference/functions/encryption-functions
sidebar_position: 67 sidebar_position: 67
sidebar_label: Encryption sidebar_label: Encryption
title: "Encryption functions"
--- ---
# Encryption functions
These functions implement encryption and decryption of data with AES (Advanced Encryption Standard) algorithm. These functions implement encryption and decryption of data with AES (Advanced Encryption Standard) algorithm.
Key length depends on encryption mode. It is 16, 24, and 32 bytes long for `-128-`, `-196-`, and `-256-` modes respectively. Key length depends on encryption mode. It is 16, 24, and 32 bytes long for `-128-`, `-196-`, and `-256-` modes respectively.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/functions/files slug: /en/sql-reference/functions/files
sidebar_position: 43 sidebar_position: 43
sidebar_label: Files sidebar_label: Files
title: "Functions for Working with Files"
--- ---
# Functions for Working with Files
## file ## file
Reads file as a String. The file content is not parsed, so any information is read as one string and placed into the specified column. Reads file as a String. The file content is not parsed, so any information is read as one string and placed into the specified column.

View File

@ -2,11 +2,9 @@
slug: /en/sql-reference/functions/geo/coordinates slug: /en/sql-reference/functions/geo/coordinates
sidebar_label: Geographical Coordinates sidebar_label: Geographical Coordinates
sidebar_position: 62 sidebar_position: 62
title: "Functions for Working with Geographical Coordinates"
--- ---
# Functions for Working with Geographical Coordinates
## greatCircleDistance ## greatCircleDistance
Calculates the distance between two points on the Earths surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance). Calculates the distance between two points on the Earths surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance).

View File

@ -1,10 +1,9 @@
--- ---
slug: /en/sql-reference/functions/geo/geohash slug: /en/sql-reference/functions/geo/geohash
sidebar_label: Geohash sidebar_label: Geohash
title: "Functions for Working with Geohash"
--- ---
# Functions for Working with Geohash
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earths surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location. [Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earths surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location.
If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/). If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/).

View File

@ -1,10 +1,9 @@
--- ---
slug: /en/sql-reference/functions/geo/h3 slug: /en/sql-reference/functions/geo/h3
sidebar_label: H3 Indexes sidebar_label: H3 Indexes
title: "Functions for Working with H3 Indexes"
--- ---
# Functions for Working with H3 Indexes
[H3](https://eng.uber.com/h3/) is a geographical indexing system where Earths surface divided into a grid of even hexagonal cells. This system is hierarchical, i. e. each hexagon on the top level ("parent") can be split into seven even but smaller ones ("children"), and so on. [H3](https://eng.uber.com/h3/) is a geographical indexing system where Earths surface divided into a grid of even hexagonal cells. This system is hierarchical, i. e. each hexagon on the top level ("parent") can be split into seven even but smaller ones ("children"), and so on.
The level of the hierarchy is called `resolution` and can receive a value from `0` till `15`, where `0` is the `base` level with the largest and coarsest cells. The level of the hierarchy is called `resolution` and can receive a value from `0` till `15`, where `0` is the `base` level with the largest and coarsest cells.

View File

@ -2,9 +2,9 @@
slug: /en/sql-reference/functions/geo/ slug: /en/sql-reference/functions/geo/
sidebar_label: Geo sidebar_label: Geo
sidebar_position: 62 sidebar_position: 62
title: "Geo Functions"
--- ---
# Geo Functions
## Geographical Coordinates Functions ## Geographical Coordinates Functions

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/functions/nlp-functions slug: /en/sql-reference/functions/nlp-functions
sidebar_position: 67 sidebar_position: 67
sidebar_label: NLP sidebar_label: NLP
title: "[experimental] Natural Language Processing functions"
--- ---
# [experimental] Natural Language Processing functions
:::warning :::warning
This is an experimental feature that is currently in development and is not ready for general use. It will change in unpredictable backwards-incompatible ways in future releases. Set `allow_experimental_nlp_functions = 1` to enable it. This is an experimental feature that is currently in development and is not ready for general use. It will change in unpredictable backwards-incompatible ways in future releases. Set `allow_experimental_nlp_functions = 1` to enable it.
::: :::

View File

@ -495,7 +495,7 @@ If the s string is non-empty and does not contain the c character at
Returns the string s that was converted from the encoding in from to the encoding in to. Returns the string s that was converted from the encoding in from to the encoding in to.
## Base58Encode(plaintext), Base58Decode(encoded_text) ## base58Encode(plaintext), base58Decode(encoded_text)
Accepts a String and encodes/decodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet. Accepts a String and encodes/decodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
@ -523,7 +523,7 @@ Query:
``` sql ``` sql
SELECT base58Encode('Encoded'); SELECT base58Encode('Encoded');
SELECT base58Encode('3dc8KtHrwM'); SELECT base58Decode('3dc8KtHrwM');
``` ```
Result: Result:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/functions/tuple-functions slug: /en/sql-reference/functions/tuple-functions
sidebar_position: 66 sidebar_position: 66
sidebar_label: Tuples sidebar_label: Tuples
title: "Functions for Working with Tuples"
--- ---
# Functions for Working with Tuples
## tuple ## tuple
A function that allows grouping multiple columns. A function that allows grouping multiple columns.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/functions/tuple-map-functions slug: /en/sql-reference/functions/tuple-map-functions
sidebar_position: 46 sidebar_position: 46
sidebar_label: Working with maps sidebar_label: Working with maps
title: "Functions for maps"
--- ---
# Functions for maps
## map ## map
Arranges `key:value` pairs into [Map(key, value)](../../sql-reference/data-types/map.md) data type. Arranges `key:value` pairs into [Map(key, value)](../../sql-reference/data-types/map.md) data type.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/column slug: /en/sql-reference/statements/alter/column
sidebar_position: 37 sidebar_position: 37
sidebar_label: COLUMN sidebar_label: COLUMN
title: "Column Manipulations"
--- ---
# Column Manipulations
A set of queries that allow changing the table structure. A set of queries that allow changing the table structure.
Syntax: Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/partition slug: /en/sql-reference/statements/alter/partition
sidebar_position: 38 sidebar_position: 38
sidebar_label: PARTITION sidebar_label: PARTITION
title: "Manipulating Partitions and Parts"
--- ---
# Manipulating Partitions and Parts
The following operations with [partitions](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) are available: The following operations with [partitions](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) are available:
- [DETACH PARTITION\|PART](#detach-partitionpart) — Moves a partition or part to the `detached` directory and forget it. - [DETACH PARTITION\|PART](#detach-partitionpart) — Moves a partition or part to the `detached` directory and forget it.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/projection slug: /en/sql-reference/statements/alter/projection
sidebar_position: 49 sidebar_position: 49
sidebar_label: PROJECTION sidebar_label: PROJECTION
title: "Manipulating Projections"
--- ---
# Manipulating Projections
The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available: The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available:
- `ALTER TABLE [db].name ADD PROJECTION name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata. - `ALTER TABLE [db].name ADD PROJECTION name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
@ -23,4 +22,4 @@ Also, they are replicated, syncing projections metadata via ZooKeeper.
:::note :::note
Projection manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). Projection manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants).
::: :::

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/quota slug: /en/sql-reference/statements/alter/quota
sidebar_position: 46 sidebar_position: 46
sidebar_label: QUOTA sidebar_label: QUOTA
title: "ALTER QUOTA"
--- ---
# ALTER QUOTA
Changes quotas. Changes quotas.
Syntax: Syntax:
@ -37,4 +36,4 @@ For the default user limit the maximum execution time with half a second in 30 m
``` sql ``` sql
ALTER QUOTA IF EXISTS qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default; ALTER QUOTA IF EXISTS qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default;
``` ```

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/sample-by slug: /en/sql-reference/statements/alter/sample-by
sidebar_position: 41 sidebar_position: 41
sidebar_label: SAMPLE BY sidebar_label: SAMPLE BY
title: "Manipulating Sampling-Key Expressions"
--- ---
# Manipulating Sampling-Key Expressions
Syntax: Syntax:
``` sql ``` sql
@ -18,4 +17,4 @@ The command is lightweight in the sense that it only changes metadata. The prima
:::note :::note
It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
::: :::

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/alter/user slug: /en/sql-reference/statements/alter/user
sidebar_position: 45 sidebar_position: 45
sidebar_label: USER sidebar_label: USER
title: "ALTER USER"
--- ---
# ALTER USER
Changes ClickHouse user accounts. Changes ClickHouse user accounts.
Syntax: Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/attach slug: /en/sql-reference/statements/attach
sidebar_position: 40 sidebar_position: 40
sidebar_label: ATTACH sidebar_label: ATTACH
title: "ATTACH Statement"
--- ---
# ATTACH Statement
Attaches a table or a dictionary, for example, when moving a database to another server. Attaches a table or a dictionary, for example, when moving a database to another server.
**Syntax** **Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/check-table slug: /en/sql-reference/statements/check-table
sidebar_position: 41 sidebar_position: 41
sidebar_label: CHECK sidebar_label: CHECK
title: "CHECK TABLE Statement"
--- ---
# CHECK TABLE Statement
Checks if the data in the table is corrupted. Checks if the data in the table is corrupted.
``` sql ``` sql

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/dictionary slug: /en/sql-reference/statements/create/dictionary
sidebar_position: 38 sidebar_position: 38
sidebar_label: DICTIONARY sidebar_label: DICTIONARY
title: "CREATE DICTIONARY"
--- ---
# CREATE DICTIONARY
Creates a new [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). Creates a new [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
**Syntax** **Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/quota slug: /en/sql-reference/statements/create/quota
sidebar_position: 42 sidebar_position: 42
sidebar_label: QUOTA sidebar_label: QUOTA
title: "CREATE QUOTA"
--- ---
# CREATE QUOTA
Creates a [quota](../../../operations/access-rights.md#quotas-management) that can be assigned to a user or a role. Creates a [quota](../../../operations/access-rights.md#quotas-management) that can be assigned to a user or a role.
Syntax: Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/role slug: /en/sql-reference/statements/create/role
sidebar_position: 40 sidebar_position: 40
sidebar_label: ROLE sidebar_label: ROLE
title: "CREATE ROLE"
--- ---
# CREATE ROLE
Creates new [roles](../../../operations/access-rights.md#role-management). Role is a set of [privileges](../../../sql-reference/statements/grant.md#grant-privileges). A [user](../../../sql-reference/statements/create/user.md) assigned a role gets all the privileges of this role. Creates new [roles](../../../operations/access-rights.md#role-management). Role is a set of [privileges](../../../sql-reference/statements/grant.md#grant-privileges). A [user](../../../sql-reference/statements/create/user.md) assigned a role gets all the privileges of this role.
Syntax: Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/row-policy slug: /en/sql-reference/statements/create/row-policy
sidebar_position: 41 sidebar_position: 41
sidebar_label: ROW POLICY sidebar_label: ROW POLICY
title: "CREATE ROW POLICY"
--- ---
# CREATE ROW POLICY
Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table. Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table.
:::warning :::warning

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/settings-profile slug: /en/sql-reference/statements/create/settings-profile
sidebar_position: 43 sidebar_position: 43
sidebar_label: SETTINGS PROFILE sidebar_label: SETTINGS PROFILE
title: "CREATE SETTINGS PROFILE"
--- ---
# CREATE SETTINGS PROFILE
Creates [settings profiles](../../../operations/access-rights.md#settings-profiles-management) that can be assigned to a user or a role. Creates [settings profiles](../../../operations/access-rights.md#settings-profiles-management) that can be assigned to a user or a role.
Syntax: Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/create/table slug: /en/sql-reference/statements/create/table
sidebar_position: 36 sidebar_position: 36
sidebar_label: TABLE sidebar_label: TABLE
title: "CREATE TABLE"
--- ---
# CREATE TABLE
Creates a new table. This query can have various syntax forms depending on a use case. Creates a new table. This query can have various syntax forms depending on a use case.
By default, tables are created only on the current server. Distributed DDL queries are implemented as `ON CLUSTER` clause, which is [described separately](../../../sql-reference/distributed-ddl.md). By default, tables are created only on the current server. Distributed DDL queries are implemented as `ON CLUSTER` clause, which is [described separately](../../../sql-reference/distributed-ddl.md).

View File

@ -2,13 +2,9 @@
slug: /en/sql-reference/statements/create/user slug: /en/sql-reference/statements/create/user
sidebar_position: 39 sidebar_position: 39
sidebar_label: USER sidebar_label: USER
tags: title: "CREATE USER"
- create user
- add user
--- ---
# CREATE USER
Creates [user accounts](../../../operations/access-rights.md#user-account-management). Creates [user accounts](../../../operations/access-rights.md#user-account-management).
Syntax: Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/describe-table slug: /en/sql-reference/statements/describe-table
sidebar_position: 42 sidebar_position: 42
sidebar_label: DESCRIBE sidebar_label: DESCRIBE
title: "DESCRIBE TABLE"
--- ---
# DESCRIBE TABLE
Returns information about table columns. Returns information about table columns.
**Syntax** **Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/detach slug: /en/sql-reference/statements/detach
sidebar_position: 43 sidebar_position: 43
sidebar_label: DETACH sidebar_label: DETACH
title: "DETACH Statement"
--- ---
# DETACH Statement
Makes the server "forget" about the existence of a table, a materialized view, or a dictionary. Makes the server "forget" about the existence of a table, a materialized view, or a dictionary.
**Syntax** **Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/explain slug: /en/sql-reference/statements/explain
sidebar_position: 39 sidebar_position: 39
sidebar_label: EXPLAIN sidebar_label: EXPLAIN
title: "EXPLAIN Statement"
--- ---
# EXPLAIN Statement
Shows the execution plan of a statement. Shows the execution plan of a statement.
Syntax: Syntax:

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/kill slug: /en/sql-reference/statements/kill
sidebar_position: 46 sidebar_position: 46
sidebar_label: KILL sidebar_label: KILL
title: "KILL Statements"
--- ---
# KILL Statements
There are two kinds of kill statements: to kill a query and to kill a mutation There are two kinds of kill statements: to kill a query and to kill a mutation
## KILL QUERY ## KILL QUERY

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/optimize slug: /en/sql-reference/statements/optimize
sidebar_position: 47 sidebar_position: 47
sidebar_label: OPTIMIZE sidebar_label: OPTIMIZE
title: "OPTIMIZE Statement"
--- ---
# OPTIMIZE Statement
This query tries to initialize an unscheduled merge of data parts for tables. This query tries to initialize an unscheduled merge of data parts for tables.
:::warning :::warning

View File

@ -1,10 +1,9 @@
--- ---
slug: /en/sql-reference/statements/select/offset slug: /en/sql-reference/statements/select/offset
sidebar_label: OFFSET sidebar_label: OFFSET
title: "OFFSET FETCH Clause"
--- ---
# OFFSET FETCH Clause
`OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query. `OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query.
``` sql ``` sql

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/statements/set-role slug: /en/sql-reference/statements/set-role
sidebar_position: 51 sidebar_position: 51
sidebar_label: SET ROLE sidebar_label: SET ROLE
title: "SET ROLE Statement"
--- ---
# SET ROLE Statement
Activates roles for the current user. Activates roles for the current user.
``` sql ``` sql

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/cluster slug: /en/sql-reference/table-functions/cluster
sidebar_position: 50 sidebar_position: 50
sidebar_label: cluster sidebar_label: cluster
title: "cluster, clusterAllReplicas"
--- ---
# cluster, clusterAllReplicas
Allows to access all shards in an existing cluster which configured in `remote_servers` section without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. One replica of each shard is queried. Allows to access all shards in an existing cluster which configured in `remote_servers` section without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. One replica of each shard is queried.
`clusterAllReplicas` function — same as `cluster`, but all replicas are queried. Each replica in a cluster is used as a separate shard/connection. `clusterAllReplicas` function — same as `cluster`, but all replicas are queried. Each replica in a cluster is used as a separate shard/connection.

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/dictionary slug: /en/sql-reference/table-functions/dictionary
sidebar_position: 54 sidebar_position: 54
sidebar_label: dictionary function sidebar_label: dictionary function
title: dictionary
--- ---
# dictionary
Displays the [dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) data as a ClickHouse table. Works the same way as [Dictionary](../../engines/table-engines/special/dictionary.md) engine. Displays the [dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) data as a ClickHouse table. Works the same way as [Dictionary](../../engines/table-engines/special/dictionary.md) engine.
**Syntax** **Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/null slug: /en/sql-reference/table-functions/null
sidebar_position: 53 sidebar_position: 53
sidebar_label: null function sidebar_label: null function
title: 'null'
--- ---
# null
Creates a temporary table of the specified structure with the [Null](../../engines/table-engines/special/null.md) table engine. According to the `Null`-engine properties, the table data is ignored and the table itself is immediately dropped right after the query execution. The function is used for the convenience of test writing and demonstrations. Creates a temporary table of the specified structure with the [Null](../../engines/table-engines/special/null.md) table engine. According to the `Null`-engine properties, the table data is ignored and the table itself is immediately dropped right after the query execution. The function is used for the convenience of test writing and demonstrations.
**Syntax** **Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/s3Cluster slug: /en/sql-reference/table-functions/s3Cluster
sidebar_position: 55 sidebar_position: 55
sidebar_label: s3Cluster sidebar_label: s3Cluster
title: "s3Cluster Table Function"
--- ---
# s3Cluster Table Function
Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterics in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished. Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterics in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished.
**Syntax** **Syntax**

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/sqlite slug: /en/sql-reference/table-functions/sqlite
sidebar_position: 55 sidebar_position: 55
sidebar_label: sqlite sidebar_label: sqlite
title: sqlite
--- ---
## sqlite
Allows to perform queries on a data stored in an [SQLite](../../engines/database-engines/sqlite.md) database. Allows to perform queries on a data stored in an [SQLite](../../engines/database-engines/sqlite.md) database.
**Syntax** **Syntax**
@ -43,4 +42,4 @@ Result:
**See Also** **See Also**
- [SQLite](../../engines/table-engines/integrations/sqlite.md) table engine - [SQLite](../../engines/table-engines/integrations/sqlite.md) table engine

View File

@ -2,10 +2,9 @@
slug: /en/sql-reference/table-functions/view slug: /en/sql-reference/table-functions/view
sidebar_position: 51 sidebar_position: 51
sidebar_label: view sidebar_label: view
title: view
--- ---
## view
Turns a subquery into a table. The function implements views (see [CREATE VIEW](https://clickhouse.com/docs/en/sql-reference/statements/create/view/#create-view)). The resulting table does not store data, but only stores the specified `SELECT` query. When reading from the table, ClickHouse executes the query and deletes all unnecessary columns from the result. Turns a subquery into a table. The function implements views (see [CREATE VIEW](https://clickhouse.com/docs/en/sql-reference/statements/create/view/#create-view)). The resulting table does not store data, but only stores the specified `SELECT` query. When reading from the table, ClickHouse executes the query and deletes all unnecessary columns from the result.
**Syntax** **Syntax**

View File

@ -344,7 +344,7 @@ Eсли суммарное число активных кусков во все
**Использование** **Использование**
Значение настройки `min_bytes_to_rebalance_partition_over_jbod` должно быть меньше значения настройки [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool). Иначе ClickHouse сгенерирует исключение. Значение настройки `min_bytes_to_rebalance_partition_over_jbod` должно быть не меньше значения настройки [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool) / 1024. Иначе ClickHouse сгенерирует исключение.
## detach_not_byte_identical_parts {#detach_not_byte_identical_parts} ## detach_not_byte_identical_parts {#detach_not_byte_identical_parts}

View File

@ -407,7 +407,7 @@ RANGE(MIN StartDate MAX EndDate);
Чтобы увеличить производительность кэша, используйте подзапрос с `LIMIT`, а снаружи вызывайте функцию со словарём. Чтобы увеличить производительность кэша, используйте подзапрос с `LIMIT`, а снаружи вызывайте функцию со словарём.
Поддерживаются [источники](external-dicts-dict-sources.md): MySQL, ClickHouse, executable, HTTP. Поддерживаются все виды источников.
Пример настройки: Пример настройки:

View File

@ -16,7 +16,7 @@ sidebar_label: "Функции для работы со строками"
empty(x) empty(x)
``` ```
Строка считается непустой, если содержит хотя бы один байт, пусть даже это пробел или нулевой байт. Строка считается непустой, если содержит хотя бы один байт, пусть даже это пробел или нулевой байт.
Функция также поддерживает работу с типами [Array](array-functions.md#function-empty) и [UUID](uuid-functions.md#empty). Функция также поддерживает работу с типами [Array](array-functions.md#function-empty) и [UUID](uuid-functions.md#empty).
@ -56,7 +56,7 @@ SELECT empty('text');
notEmpty(x) notEmpty(x)
``` ```
Строка считается непустой, если содержит хотя бы один байт, пусть даже это пробел или нулевой байт. Строка считается непустой, если содержит хотя бы один байт, пусть даже это пробел или нулевой байт.
Функция также поддерживает работу с типами [Array](array-functions.md#function-notempty) и [UUID](uuid-functions.md#notempty). Функция также поддерживает работу с типами [Array](array-functions.md#function-notempty) и [UUID](uuid-functions.md#notempty).
@ -491,21 +491,21 @@ SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY (key1, key2);
Возвращает сконвертированную из кодировки from в кодировку to строку s. Возвращает сконвертированную из кодировки from в кодировку to строку s.
## Base58Encode(plaintext), Base58Decode(encoded_text) {#base58} ## base58Encode(plaintext), base58Decode(encoded_text) {#base58}
Принимает на вход строку или колонку строк и кодирует/раскодирует их с помощью схемы кодирования [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) с использованием стандартного алфавита Bitcoin. Принимает на вход строку или колонку строк и кодирует/раскодирует их с помощью схемы кодирования [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) с использованием стандартного алфавита Bitcoin.
**Синтаксис** **Синтаксис**
```sql ```sql
encodeBase58(decoded) base58Encode(decoded)
decodeBase58(encoded) base58Decode(encoded)
``` ```
**Аргументы** **Аргументы**
- `decoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md). - `decoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md).
- `encoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md). Если входная строка не является корректным кодом для какой-либо другой строки, возникнет исключение `1001`. - `encoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md). Если входная строка не является корректным кодом для какой-либо другой строки, возникнет исключение.
**Возвращаемое значение** **Возвращаемое значение**
@ -518,18 +518,18 @@ decodeBase58(encoded)
Запрос: Запрос:
``` sql ``` sql
SELECT encodeBase58('encode'); SELECT base58Encode('Encoded');
SELECT decodeBase58('izCFiDUY'); SELECT base58Decode('3dc8KtHrwM');
``` ```
Результат: Результат:
```text ```text
┌─encodeBase58('encode', 'flickr')─┐ ┌─base58Encode('Encoded')─┐
SvyTHb1D 3dc8KtHrwM
└────────────────────────────────── └─────────────────────────┘
┌─decodeBase58('izCFiDUY', 'ripple')─┐ ┌─base58Decode('3dc8KtHrwM')─┐
decode Encoded
└──────────────────────────────────── └────────────────────────────┘
``` ```
## base64Encode(s) {#base64encode} ## base64Encode(s) {#base64encode}

View File

@ -1,5 +1,5 @@
--- ---
slug: /ru/sql-reference/statements/alter/ slug: /ru/sql-reference/statements/alter/index
toc_hidden_folder: true toc_hidden_folder: true
sidebar_position: 42 sidebar_position: 42
sidebar_label: "Манипуляции с индексами" sidebar_label: "Манипуляции с индексами"

View File

@ -153,8 +153,8 @@ ClickHouse只有一个物理排序由 `order by` 条件决定。要创建一
* 修改列类型。必须与原始类型兼容,否则复制将失败。例如,可以将`UInt32`列修改为`UInt64`,不能将 `String` 列修改为 `Array(String)` * 修改列类型。必须与原始类型兼容,否则复制将失败。例如,可以将`UInt32`列修改为`UInt64`,不能将 `String` 列修改为 `Array(String)`
* 修改 [column TTL](../table-engines/mergetree-family/mergetree/#mergetree-column-ttl). * 修改 [column TTL](../table-engines/mergetree-family/mergetree/#mergetree-column-ttl).
* 修改 [column compression codec](../../sql-reference/statements/create/table.md/#codecs). * 修改 [column compression codec](../../sql-reference/statements/create/table.mdx#codecs).
* 增加 [ALIAS columns](../../sql-reference/statements/create/table.md/#alias). * 增加 [ALIAS columns](../../sql-reference/statements/create/table.mdx#alias).
* 增加 [skipping indexes](../table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes) * 增加 [skipping indexes](../table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes)
* 增加 [projections](../table-engines/mergetree-family/mergetree/#projections). * 增加 [projections](../table-engines/mergetree-family/mergetree/#projections).
请注意,当使用 `SELECT ... FINAL ` (MaterializedMySQL默认是这样做的) 时,预测优化是被禁用的,所以这里是受限的, `INDEX ... TYPE hypothesis `[在v21.12的博客文章中描述]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)可能在这种情况下更有用。 请注意,当使用 `SELECT ... FINAL ` (MaterializedMySQL默认是这样做的) 时,预测优化是被禁用的,所以这里是受限的, `INDEX ... TYPE hypothesis `[在v21.12的博客文章中描述]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)可能在这种情况下更有用。

View File

@ -34,7 +34,7 @@ CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_na
当创建数据库的新副本时,该副本会自己创建表。如果副本已经不可用很长一段时间,并且已经滞后于复制日志-它用ZooKeeper中的当前元数据检查它的本地元数据将带有数据的额外表移动到一个单独的非复制数据库(以免意外地删除任何多余的东西),创建缺失的表,如果表名已经被重命名,则更新表名。数据在`ReplicatedMergeTree`级别被复制,也就是说,如果表没有被复制,数据将不会被复制(数据库只负责元数据)。 当创建数据库的新副本时,该副本会自己创建表。如果副本已经不可用很长一段时间,并且已经滞后于复制日志-它用ZooKeeper中的当前元数据检查它的本地元数据将带有数据的额外表移动到一个单独的非复制数据库(以免意外地删除任何多余的东西),创建缺失的表,如果表名已经被重命名,则更新表名。数据在`ReplicatedMergeTree`级别被复制,也就是说,如果表没有被复制,数据将不会被复制(数据库只负责元数据)。
允许[`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.md)查询,但不允许复制。数据库引擎将只向当前副本添加/获取/删除分区/部件。但是如果表本身使用了Replicated表引擎那么数据将在使用`ATTACH`后被复制。 允许[`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.mdx)查询,但不允许复制。数据库引擎将只向当前副本添加/获取/删除分区/部件。但是如果表本身使用了Replicated表引擎那么数据将在使用`ATTACH`后被复制。
## 使用示例 {#usage-example} ## 使用示例 {#usage-example}
创建三台主机的集群: 创建三台主机的集群:

View File

@ -1 +0,0 @@
../../../../en/engines/table-engines/integrations/ExternalDistributed.md

View File

@ -0,0 +1,10 @@
---
slug: /zh/engines/table-engines/integrations/ExternalDistributed
sidebar_position: 12
sidebar_label: ExternalDistributed
title: ExternalDistributed
---
import Content from '@site/docs/en/engines/table-engines/integrations/ExternalDistributed.md';
<Content />

View File

@ -25,7 +25,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
) ENGINE = Hive('thrift://host:port', 'database', 'table'); ) ENGINE = Hive('thrift://host:port', 'database', 'table');
PARTITION BY expr PARTITION BY expr
``` ```
查看[CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query)查询的详细描述。 查看[CREATE TABLE](../../../sql-reference/statements/create/table.mdx#create-table-query)查询的详细描述。
表的结构可以与原来的Hive表结构有所不同: 表的结构可以与原来的Hive表结构有所不同:
- 列名应该与原来的Hive表相同但你可以使用这些列中的一些并以任何顺序你也可以使用一些从其他列计算的别名列。 - 列名应该与原来的Hive表相同但你可以使用这些列中的一些并以任何顺序你也可以使用一些从其他列计算的别名列。

View File

@ -1 +0,0 @@
../../../../en/engines/table-engines/integrations/materialized-postgresql.md

View File

@ -0,0 +1,10 @@
---
slug: /zh/engines/table-engines/integrations/materialized-postgresql
sidebar_position: 12
sidebar_label: MaterializedPostgreSQL
title: MaterializedPostgreSQL
---
import Content from '@site/docs/en/engines/table-engines/integrations/materialized-postgresql.md';
<Content />

View File

@ -19,7 +19,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]); ) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]);
``` ```
<!-- 详情请见 [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) 查询. --> <!-- 详情请见 [CREATE TABLE](../../../sql-reference/statements/create/table.mdx#create-table-query) 查询. -->
表结构可以与 PostgreSQL 源表结构不同: 表结构可以与 PostgreSQL 源表结构不同:

View File

@ -57,4 +57,4 @@ SELECT * FROM sqlite_db.table2 ORDER BY col1;
**详见** **详见**
- [SQLite](../../../engines/database-engines/sqlite.md) 引擎 - [SQLite](../../../engines/database-engines/sqlite.md) 引擎
- [sqlite](../../../sql-reference/table-functions/sqlite.md) 表方法函数 - [sqlite](../../../sql-reference/table-functions/sqlite.mdx) 表方法函数

View File

@ -285,7 +285,7 @@ sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data
- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) - [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size)
- [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) - [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size)
- [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold) - [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold)
- [max_replicated_fetches_network_bandwidth](../../../operations/settings/merge-tree-settings.md#max_replicated_fetches_network_bandwidth) - [max_replicated_fetches_network_bandwidth](../../../operations/settings/merge-tree-settings.mdx#max_replicated_fetches_network_bandwidth)
- [max_replicated_sends_network_bandwidth](../../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth) - [max_replicated_sends_network_bandwidth](../../../operations/settings/merge-tree-settings.mdx#max_replicated_sends_network_bandwidth)
[原始文章](https://clickhouse.com/docs/en/operations/table_engines/replication/) <!--hide--> [原始文章](https://clickhouse.com/docs/en/operations/table_engines/replication/) <!--hide-->

View File

@ -226,7 +226,7 @@ SELECT 查询会被发送到所有分片,并且无论数据在分片中如何
- `_shard_num` — 表`system.clusters` 中的 `shard_num` 值 . 数据类型: [UInt32](../../../sql-reference/data-types/int-uint.md). - `_shard_num` — 表`system.clusters` 中的 `shard_num` 值 . 数据类型: [UInt32](../../../sql-reference/data-types/int-uint.md).
!!! note "备注" !!! note "备注"
因为 [remote](../../../sql-reference/table-functions/remote.md) 和 [cluster](../../../sql-reference/table-functions/cluster.md) 表方法内部创建了分布式表, `_shard_num` 对他们都有效. 因为 [remote](../../../sql-reference/table-functions/remote.md) 和 [cluster](../../../sql-reference/table-functions/cluster.mdx) 表方法内部创建了分布式表, `_shard_num` 对他们都有效.
**详见** **详见**
- [虚拟列](../../../engines/table-engines/index.md#table_engines-virtual_columns) 描述 - [虚拟列](../../../engines/table-engines/index.md#table_engines-virtual_columns) 描述

View File

@ -18,7 +18,7 @@ Indexes
: ClickHouse keeps data structures in memory that allows reading not only used columns but only necessary row ranges of those columns. : ClickHouse keeps data structures in memory that allows reading not only used columns but only necessary row ranges of those columns.
Data compression Data compression
: Storing different values of the same column together often leads to better compression ratios (compared to row-oriented systems) because in real data column often has the same or not so many different values for neighboring rows. In addition to general-purpose compression, ClickHouse supports [specialized codecs](../../sql-reference/statements/create/table.md/#create-query-specialized-codecs) that can make data even more compact. : Storing different values of the same column together often leads to better compression ratios (compared to row-oriented systems) because in real data column often has the same or not so many different values for neighboring rows. In addition to general-purpose compression, ClickHouse supports [specialized codecs](../../sql-reference/statements/create/table.mdx/#create-query-specialized-codecs) that can make data even more compact.
Vectorized query execution Vectorized query execution
: ClickHouse not only stores data in columns but also processes data in columns. It leads to better CPU cache utilization and allows for [SIMD](https://en.wikipedia.org/wiki/SIMD) CPU instructions usage. : ClickHouse not only stores data in columns but also processes data in columns. It leads to better CPU cache utilization and allows for [SIMD](https://en.wikipedia.org/wiki/SIMD) CPU instructions usage.

View File

@ -35,7 +35,7 @@ More details on [mutations](../../sql-reference/statements/alter.md/#alter-mutat
`ALTER TABLE ... DROP PARTITION` provides a cost-efficient way to drop a whole partition. Its not that flexible and needs proper partitioning scheme configured on table creation, but still covers most common cases. Like mutations need to be executed from an external system for regular use. `ALTER TABLE ... DROP PARTITION` provides a cost-efficient way to drop a whole partition. Its not that flexible and needs proper partitioning scheme configured on table creation, but still covers most common cases. Like mutations need to be executed from an external system for regular use.
More details on [manipulating partitions](../../sql-reference/statements/alter/partition.md/#alter_drop-partition). More details on [manipulating partitions](../../sql-reference/statements/alter/partition.mdx/#alter_drop-partition).
## TRUNCATE {#truncate} ## TRUNCATE {#truncate}

View File

@ -11,7 +11,7 @@ ClickHouse是一个通用的数据存储解决方案[OLAP](../../faq/general/ola
首先,有 **[specialized codecs](../../sql-reference/statements/create/table.md#create-query-specialized-codecs)**这是典型的时间序列。无论是常见的算法如“DoubleDelta”和“Gorilla”或特定的ClickHouse 数据类型如“T64”。 首先,有 **[specialized codecs](../../sql-reference/statements/create/table.mdx#create-query-specialized-codecs)**这是典型的时间序列。无论是常见的算法如“DoubleDelta”和“Gorilla”或特定的ClickHouse 数据类型如“T64”。
@ -19,4 +19,4 @@ ClickHouse是一个通用的数据存储解决方案[OLAP](../../faq/general/ola
尽管这与ClickHouse存储和处理原始数据的理念相违背但你可以使用[materialized views](../../sql-reference/statements/create/view.md)来适应更紧迫的延迟或成本需求。 尽管这与ClickHouse存储和处理原始数据的理念相违背但你可以使用[materialized views](../../sql-reference/statements/create/view.md)来适应更紧迫的延迟或成本需求。

View File

@ -1 +0,0 @@
../../../en/getting-started/example-datasets/brown-benchmark.md

View File

@ -0,0 +1,10 @@
---
slug: /zh/getting-started/example-datasets/brown-benchmark
sidebar_label: Brown University Benchmark
description: A new analytical benchmark for machine-generated log data
title: "Brown University Benchmark"
---
import Content from '@site/docs/en/getting-started/example-datasets/brown-benchmark.md';
<Content />

View File

@ -1 +0,0 @@
../../../en/getting-started/example-datasets/cell-towers.md

View File

@ -0,0 +1,9 @@
---
slug: /zh/getting-started/example-datasets/cell-towers
sidebar_label: Cell Towers
title: "Cell Towers"
---
import Content from '@site/docs/en/getting-started/example-datasets/cell-towers.md';
<Content />

View File

@ -1 +0,0 @@
../../../en/getting-started/example-datasets/menus.md

View File

@ -0,0 +1,9 @@
---
slug: /zh/getting-started/example-datasets/menus
sidebar_label: New York Public Library "What's on the Menu?" Dataset
title: "New York Public Library \"What's on the Menu?\" Dataset"
---
import Content from '@site/docs/en/getting-started/example-datasets/menus.md';
<Content />

View File

@ -1 +0,0 @@
../../../en/getting-started/example-datasets/opensky.md

Some files were not shown because too many files have changed in this diff Show More