mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge branch 'master' into fix-install-snapshot
This commit is contained in:
commit
97fd3d9123
@ -368,7 +368,8 @@ endif()
|
||||
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
|
||||
|
||||
set (DEBUG_INFO_FLAGS "-g")
|
||||
# Our built-in unwinder only supports DWARF version up to 4.
|
||||
set (DEBUG_INFO_FLAGS "-g -gdwarf-4")
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
|
22
base/glibc-compatibility/musl/dup3.c
Normal file
22
base/glibc-compatibility/musl/dup3.c
Normal file
@ -0,0 +1,22 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include "syscall.h"
|
||||
|
||||
int dup3(int old, int new, int flags)
|
||||
{
|
||||
int r;
|
||||
#ifdef SYS_dup2
|
||||
if (old==new) return __syscall_ret(-EINVAL);
|
||||
if (flags & O_CLOEXEC) {
|
||||
while ((r=__syscall(SYS_dup3, old, new, flags))==-EBUSY);
|
||||
if (r!=-ENOSYS) return __syscall_ret(r);
|
||||
}
|
||||
while ((r=__syscall(SYS_dup2, old, new))==-EBUSY);
|
||||
if (flags & O_CLOEXEC) __syscall(SYS_fcntl, new, F_SETFD, FD_CLOEXEC);
|
||||
#else
|
||||
while ((r=__syscall(SYS_dup3, old, new, flags))==-EBUSY);
|
||||
#endif
|
||||
return __syscall_ret(r);
|
||||
}
|
26
base/glibc-compatibility/musl/inotify.c
Normal file
26
base/glibc-compatibility/musl/inotify.c
Normal file
@ -0,0 +1,26 @@
|
||||
#include <sys/inotify.h>
|
||||
#include <errno.h>
|
||||
#include "syscall.h"
|
||||
|
||||
int inotify_init()
|
||||
{
|
||||
return inotify_init1(0);
|
||||
}
|
||||
int inotify_init1(int flags)
|
||||
{
|
||||
int r = __syscall(SYS_inotify_init1, flags);
|
||||
#ifdef SYS_inotify_init
|
||||
if (r==-ENOSYS && !flags) r = __syscall(SYS_inotify_init);
|
||||
#endif
|
||||
return __syscall_ret(r);
|
||||
}
|
||||
|
||||
int inotify_add_watch(int fd, const char *pathname, uint32_t mask)
|
||||
{
|
||||
return syscall(SYS_inotify_add_watch, fd, pathname, mask);
|
||||
}
|
||||
|
||||
int inotify_rm_watch(int fd, int wd)
|
||||
{
|
||||
return syscall(SYS_inotify_rm_watch, fd, wd);
|
||||
}
|
2
contrib/libuv
vendored
2
contrib/libuv
vendored
@ -1 +1 @@
|
||||
Subproject commit 95081e7c16c9857babe6d4e2bc1c779198ea89ae
|
||||
Subproject commit 3a85b2eb3d83f369b8a8cafd329d7e9dc28f60cf
|
@ -15,6 +15,7 @@ set(uv_sources
|
||||
src/inet.c
|
||||
src/random.c
|
||||
src/strscpy.c
|
||||
src/strtok.c
|
||||
src/threadpool.c
|
||||
src/timer.c
|
||||
src/uv-common.c
|
||||
@ -75,13 +76,13 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
|
||||
list(APPEND uv_libraries rt)
|
||||
list(APPEND uv_sources
|
||||
src/unix/epoll.c
|
||||
src/unix/linux-core.c
|
||||
src/unix/linux-inotify.c
|
||||
src/unix/linux-syscalls.c
|
||||
src/unix/procfs-exepath.c
|
||||
src/unix/random-getrandom.c
|
||||
src/unix/random-sysctl-linux.c
|
||||
src/unix/sysinfo-loadavg.c)
|
||||
src/unix/random-sysctl-linux.c)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD")
|
||||
@ -111,6 +112,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS/390")
|
||||
src/unix/pthread-fixes.c
|
||||
src/unix/pthread-barrier.c
|
||||
src/unix/os390.c
|
||||
src/unix/os390-proctitle.c
|
||||
src/unix/os390-syscalls.c)
|
||||
endif()
|
||||
|
||||
|
25
docs/changelogs/v22.3.12.19-lts.md
Normal file
25
docs/changelogs/v22.3.12.19-lts.md
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.3.12.19-lts (4a08f8a073b) FIXME as compared to v22.3.11.12-lts (137c5f72657)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#40695](https://github.com/ClickHouse/ClickHouse/issues/40695): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#40160](https://github.com/ClickHouse/ClickHouse/issues/40160): fix HashMethodOneNumber get wrong key value when column is const. [#40020](https://github.com/ClickHouse/ClickHouse/pull/40020) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#40122](https://github.com/ClickHouse/ClickHouse/issues/40122): Fix bug in collectFilesToSkip() by adding correct file extension(.idx or idx2) for indexes to be recalculated, avoid wrong hard links. Fixed [#39896](https://github.com/ClickHouse/ClickHouse/issues/39896). [#40095](https://github.com/ClickHouse/ClickHouse/pull/40095) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Backported in [#40207](https://github.com/ClickHouse/ClickHouse/issues/40207): Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#40270](https://github.com/ClickHouse/ClickHouse/issues/40270): Fix possible segfault in CapnProto input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* fix heap buffer overflow by limiting http chunk size [#40292](https://github.com/ClickHouse/ClickHouse/pull/40292) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Reduce changelog verbosity in CI [#40360](https://github.com/ClickHouse/ClickHouse/pull/40360) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backport the upstream clickhouse_helper.py [#40490](https://github.com/ClickHouse/ClickHouse/pull/40490) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
17
docs/changelogs/v22.6.7.7-stable.md
Normal file
17
docs/changelogs/v22.6.7.7-stable.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.6.7.7-stable (8eae2af3b9a) FIXME as compared to v22.6.6.16-stable (d2a33ebc822)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#40692](https://github.com/ClickHouse/ClickHouse/issues/40692): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#40531](https://github.com/ClickHouse/ClickHouse/issues/40531): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#40623](https://github.com/ClickHouse/ClickHouse/issues/40623): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).
|
||||
|
23
docs/changelogs/v22.7.5.13-stable.md
Normal file
23
docs/changelogs/v22.7.5.13-stable.md
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.7.5.13-stable (6f48d2d1f59) FIXME as compared to v22.7.4.16-stable (0b9272f8fdc)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#40693](https://github.com/ClickHouse/ClickHouse/issues/40693): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#40542](https://github.com/ClickHouse/ClickHouse/issues/40542): Fix potential deadlock in WriteBufferFromS3 during task scheduling failure. [#40070](https://github.com/ClickHouse/ClickHouse/pull/40070) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#40450](https://github.com/ClickHouse/ClickHouse/issues/40450): Fix rare bug with column TTL for MergeTree engines family: In case of repeated vertical merge the error `Cannot unlink file ColumnName.bin ... No such file or directory.` could happen. [#40346](https://github.com/ClickHouse/ClickHouse/pull/40346) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#40532](https://github.com/ClickHouse/ClickHouse/issues/40532): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#40624](https://github.com/ClickHouse/ClickHouse/issues/40624): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* DNSResolver remove AI_V4MAPPED, AI_ALL hints [#40502](https://github.com/ClickHouse/ClickHouse/pull/40502) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
25
docs/changelogs/v22.8.3.13-lts.md
Normal file
25
docs/changelogs/v22.8.3.13-lts.md
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.8.3.13-lts (6a15b73faea) FIXME as compared to v22.8.2.11-lts (b4ed6d744ff)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#40550](https://github.com/ClickHouse/ClickHouse/issues/40550): Improve schema inference cache, respect format settings that can change the schema. [#40414](https://github.com/ClickHouse/ClickHouse/pull/40414) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#40694](https://github.com/ClickHouse/ClickHouse/issues/40694): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#40451](https://github.com/ClickHouse/ClickHouse/issues/40451): Fix rare bug with column TTL for MergeTree engines family: In case of repeated vertical merge the error `Cannot unlink file ColumnName.bin ... No such file or directory.` could happen. [#40346](https://github.com/ClickHouse/ClickHouse/pull/40346) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#40533](https://github.com/ClickHouse/ClickHouse/issues/40533): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#40625](https://github.com/ClickHouse/ClickHouse/issues/40625): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* DNSResolver remove AI_V4MAPPED, AI_ALL hints [#40502](https://github.com/ClickHouse/ClickHouse/pull/40502) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
@ -99,7 +99,7 @@ mysql> select * from mysql_table;
|
||||
Database in ClickHouse, exchanging data with the MySQL server:
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password')
|
||||
CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') SETTINGS read_write_timeout=10000, connect_timeout=100;
|
||||
```
|
||||
|
||||
``` sql
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/engines/table-engines/integrations/ExternalDistributed
|
||||
sidebar_position: 12
|
||||
sidebar_label: ExternalDistributed
|
||||
title: ExternalDistributed
|
||||
---
|
||||
|
||||
# ExternalDistributed
|
||||
|
||||
The `ExternalDistributed` engine allows to perform `SELECT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible.
|
||||
|
||||
## Creating a Table {#creating-a-table}
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/engines/table-engines/integrations/materialized-postgresql
|
||||
sidebar_position: 12
|
||||
sidebar_label: MaterializedPostgreSQL
|
||||
title: MaterializedPostgreSQL
|
||||
---
|
||||
|
||||
# MaterializedPostgreSQL
|
||||
|
||||
Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database.
|
||||
|
||||
If more than one table is required, it is highly recommended to use the [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) database engine instead of the table engine and use the `materialized_postgresql_tables_list` setting, which specifies the tables to be replicated (will also be possible to add database `schema`). It will be much better in terms of CPU, fewer connections and fewer replication slots inside the remote PostgreSQL database.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/getting-started/example-datasets/brown-benchmark
|
||||
sidebar_label: Brown University Benchmark
|
||||
description: A new analytical benchmark for machine-generated log data
|
||||
title: "Brown University Benchmark"
|
||||
---
|
||||
|
||||
# Brown University Benchmark
|
||||
|
||||
`MgBench` is a new analytical benchmark for machine-generated log data, [Andrew Crotty](http://cs.brown.edu/people/acrotty/).
|
||||
|
||||
Download the data:
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/cell-towers
|
||||
sidebar_label: Cell Towers
|
||||
title: "Cell Towers"
|
||||
---
|
||||
|
||||
# Cell Towers
|
||||
|
||||
This dataset is from [OpenCellid](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers.
|
||||
|
||||
As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc).
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/menus
|
||||
sidebar_label: New York Public Library "What's on the Menu?" Dataset
|
||||
title: "New York Public Library \"What's on the Menu?\" Dataset"
|
||||
---
|
||||
|
||||
# New York Public Library "What's on the Menu?" Dataset
|
||||
|
||||
The dataset is created by the New York Public Library. It contains historical data on the menus of hotels, restaurants and cafes with the dishes along with their prices.
|
||||
|
||||
Source: http://menus.nypl.org/data
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/getting-started/example-datasets/opensky
|
||||
sidebar_label: Air Traffic Data
|
||||
description: The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic.
|
||||
title: "Crowdsourced air traffic data from The OpenSky Network 2020"
|
||||
---
|
||||
|
||||
# Crowdsourced air traffic data from The OpenSky Network 2020
|
||||
|
||||
The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic. It spans all flights seen by the network's more than 2500 members since 1 January 2019. More data will be periodically included in the dataset until the end of the COVID-19 pandemic.
|
||||
|
||||
Source: https://zenodo.org/record/5092942#.YRBCyTpRXYd
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/recipes
|
||||
sidebar_label: Recipes Dataset
|
||||
title: "Recipes Dataset"
|
||||
---
|
||||
|
||||
# Recipes Dataset
|
||||
|
||||
RecipeNLG dataset is available for download [here](https://recipenlg.cs.put.poznan.pl/dataset). It contains 2.2 million recipes. The size is slightly less than 1 GB.
|
||||
|
||||
## Download and Unpack the Dataset
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/getting-started/example-datasets/uk-price-paid
|
||||
sidebar_label: UK Property Price Paid
|
||||
sidebar_position: 1
|
||||
title: "UK Property Price Paid"
|
||||
---
|
||||
|
||||
# UK Property Price Paid
|
||||
|
||||
The dataset contains data about prices paid for real-estate property in England and Wales. The data is available since year 1995.
|
||||
The size of the dataset in uncompressed form is about 4 GiB and it will take about 278 MiB in ClickHouse.
|
||||
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/operations/caches
|
||||
sidebar_position: 65
|
||||
sidebar_label: Caches
|
||||
title: "Cache Types"
|
||||
---
|
||||
|
||||
# Cache Types
|
||||
|
||||
When performing queries, ClickHouse uses different caches.
|
||||
|
||||
Main cache types:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/operations/external-authenticators/
|
||||
sidebar_position: 48
|
||||
sidebar_label: External User Authenticators and Directories
|
||||
title: "External User Authenticators and Directories"
|
||||
---
|
||||
|
||||
# External User Authenticators and Directories
|
||||
|
||||
ClickHouse supports authenticating and managing users using external services.
|
||||
|
||||
The following external authenticators and directories are supported:
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/external-authenticators/ldap
|
||||
title: "LDAP"
|
||||
---
|
||||
# LDAP
|
||||
|
||||
LDAP server can be used to authenticate ClickHouse users. There are two different approaches for doing this:
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/external-authenticators/ssl-x509
|
||||
title: "SSL X.509 certificate authentication"
|
||||
---
|
||||
# SSL X.509 certificate authentication
|
||||
|
||||
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` field of the certificate is used to identify connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
||||
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/operations/opentelemetry
|
||||
sidebar_position: 62
|
||||
sidebar_label: OpenTelemetry Support
|
||||
title: "[experimental] OpenTelemetry Support"
|
||||
---
|
||||
|
||||
# [experimental] OpenTelemetry Support
|
||||
|
||||
[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from the distributed application. ClickHouse has some support for OpenTelemetry.
|
||||
|
||||
:::warning
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/settings/merge-tree-settings
|
||||
title: "MergeTree tables settings"
|
||||
---
|
||||
# MergeTree tables settings
|
||||
|
||||
The values of `merge_tree` settings (for all MergeTree tables) can be viewed in the table `system.merge_tree_settings`, they can be overridden in `config.xml` in the `merge_tree` section, or set in the `SETTINGS` section of each table.
|
||||
|
||||
|
@ -968,6 +968,12 @@ See also:
|
||||
|
||||
- [Apache Kafka](https://kafka.apache.org/)
|
||||
|
||||
## kafka_disable_num_consumers_limit {#kafka-disable-num-consumers-limit}
|
||||
|
||||
Disable limit on kafka_num_consumers that depends on the number of available CPU cores.
|
||||
|
||||
Default value: false.
|
||||
|
||||
## use_uncompressed_cache {#setting-use_uncompressed_cache}
|
||||
|
||||
Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled).
|
||||
|
@ -1,11 +1,10 @@
|
||||
---
|
||||
slug: /en/operations/storing-data
|
||||
sidebar_position: 68
|
||||
sidebar_label: External Disks for Storing Data
|
||||
sidebar_label: "External Disks for Storing Data"
|
||||
title: "External Disks for Storing Data"
|
||||
---
|
||||
|
||||
# External Disks for Storing Data
|
||||
|
||||
Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)).
|
||||
|
||||
To work with data stored on `Amazon S3` disks use [S3](../engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](../engines/table-engines/integrations/hdfs.md) table engine.
|
||||
|
@ -1,9 +1,8 @@
|
||||
---
|
||||
slug: /en/operations/utilities/clickhouse-compressor
|
||||
title: clickhouse-compressor
|
||||
---
|
||||
|
||||
# clickhouse-compressor
|
||||
|
||||
Simple program for data compression and decompression.
|
||||
|
||||
### Examples
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/utilities/clickhouse-format
|
||||
title: clickhouse-format
|
||||
---
|
||||
# clickhouse-format
|
||||
|
||||
Allows formatting input queries.
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/utilities/clickhouse-obfuscator
|
||||
title: clickhouse-obfuscator
|
||||
---
|
||||
# clickhouse-obfuscator
|
||||
|
||||
A simple tool for table data obfuscation.
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/utilities/odbc-bridge
|
||||
title: clickhouse-odbc-bridge
|
||||
---
|
||||
# clickhouse-odbc-bridge
|
||||
|
||||
Simple HTTP-server which works like a proxy for ODBC driver. The main motivation
|
||||
was possible segfaults or another faults in ODBC implementations, which can
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/deltasumtimestamp
|
||||
sidebar_position: 141
|
||||
title: deltaSumTimestamp
|
||||
---
|
||||
|
||||
# deltaSumTimestamp
|
||||
|
||||
Adds the difference between consecutive rows. If the difference is negative, it is ignored.
|
||||
|
||||
This function is primarily for [materialized views](../../../sql-reference/statements/create/view.md#materialized) that are ordered by some time bucket-aligned timestamp, for example, a `toStartOfMinute` bucket. Because the rows in such a materialized view will all have the same timestamp, it is impossible for them to be merged in the "right" order. This function keeps track of the `timestamp` of the values it's seen, so it's possible to order the states correctly during merging.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/aggregate-functions/reference/intervalLengthSum
|
||||
sidebar_position: 146
|
||||
sidebar_label: intervalLengthSum
|
||||
title: intervalLengthSum
|
||||
---
|
||||
|
||||
# intervalLengthSum
|
||||
|
||||
Calculates the total length of union of all ranges (segments on numeric axis).
|
||||
|
||||
**Syntax**
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/quantilebfloat16
|
||||
sidebar_position: 209
|
||||
title: quantileBFloat16
|
||||
---
|
||||
|
||||
# quantileBFloat16
|
||||
|
||||
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. `bfloat16` is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits.
|
||||
The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates `bfloat16` quantile value and converts the result to a 64-bit float by appending zero bits.
|
||||
The function is a fast quantile estimator with a relative error no more than 0.390625%.
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/sumcount
|
||||
sidebar_position: 144
|
||||
title: sumCount
|
||||
---
|
||||
|
||||
# sumCount
|
||||
|
||||
Calculates the sum of the numbers and counts the number of rows at the same time. The function is used by ClickHouse query optimizer: if there are multiple `sum`, `count` or `avg` functions in a query, they can be replaced to single `sumCount` function to reuse the calculations. The function is rarely needed to use explicitly.
|
||||
|
||||
**Syntax**
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/sumkahan
|
||||
sidebar_position: 145
|
||||
title: sumKahan
|
||||
---
|
||||
|
||||
# sumKahan
|
||||
|
||||
Calculates the sum of the numbers with [Kahan compensated summation algorithm](https://en.wikipedia.org/wiki/Kahan_summation_algorithm)
|
||||
Slower than [sum](./sum.md) function.
|
||||
The compensation works only for [Float](../../../sql-reference/data-types/float.md) types.
|
||||
|
@ -46,7 +46,7 @@ Binary operations on Decimal result in wider result type (with any order of argu
|
||||
Rules for scale:
|
||||
|
||||
- add, subtract: S = max(S1, S2).
|
||||
- multuply: S = S1 + S2.
|
||||
- multiply: S = S1 + S2.
|
||||
- divide: S = S1.
|
||||
|
||||
For similar operations between Decimal and integers, the result is Decimal of the same size as an argument.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/data-types/geo
|
||||
sidebar_position: 62
|
||||
sidebar_label: Geo
|
||||
title: "Geo Data Types"
|
||||
---
|
||||
|
||||
# Geo Data Types
|
||||
|
||||
ClickHouse supports data types for representing geographical objects — locations, lands, etc.
|
||||
|
||||
:::warning
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/data-types/multiword-types
|
||||
sidebar_position: 61
|
||||
sidebar_label: Multiword Type Names
|
||||
title: "Multiword Types"
|
||||
---
|
||||
|
||||
# Multiword Types
|
||||
|
||||
When creating tables, you can use data types with a name consisting of several words. This is implemented for better SQL compatibility.
|
||||
|
||||
## Multiword Types Support
|
||||
|
@ -411,7 +411,7 @@ If setting `allow_read_expired_keys` is set to 1, by default 0. Then dictionary
|
||||
|
||||
To improve cache performance, use a subquery with `LIMIT`, and call the function with the dictionary externally.
|
||||
|
||||
Supported [sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md): MySQL, ClickHouse, executable, HTTP.
|
||||
All types of sources are supported.
|
||||
|
||||
Example of settings:
|
||||
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon
|
||||
sidebar_position: 46
|
||||
sidebar_label: Polygon Dictionaries With Grids
|
||||
title: "Polygon dictionaries"
|
||||
---
|
||||
|
||||
# Polygon dictionaries
|
||||
|
||||
Polygon dictionaries allow you to efficiently search for the polygon containing specified points.
|
||||
For example: defining a city area by geographical coordinates.
|
||||
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/functions/encryption-functions
|
||||
sidebar_position: 67
|
||||
sidebar_label: Encryption
|
||||
title: "Encryption functions"
|
||||
---
|
||||
|
||||
# Encryption functions
|
||||
|
||||
These functions implement encryption and decryption of data with AES (Advanced Encryption Standard) algorithm.
|
||||
|
||||
Key length depends on encryption mode. It is 16, 24, and 32 bytes long for `-128-`, `-196-`, and `-256-` modes respectively.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/functions/files
|
||||
sidebar_position: 43
|
||||
sidebar_label: Files
|
||||
title: "Functions for Working with Files"
|
||||
---
|
||||
|
||||
# Functions for Working with Files
|
||||
|
||||
## file
|
||||
|
||||
Reads file as a String. The file content is not parsed, so any information is read as one string and placed into the specified column.
|
||||
|
@ -2,11 +2,9 @@
|
||||
slug: /en/sql-reference/functions/geo/coordinates
|
||||
sidebar_label: Geographical Coordinates
|
||||
sidebar_position: 62
|
||||
title: "Functions for Working with Geographical Coordinates"
|
||||
---
|
||||
|
||||
|
||||
# Functions for Working with Geographical Coordinates
|
||||
|
||||
## greatCircleDistance
|
||||
|
||||
Calculates the distance between two points on the Earth’s surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance).
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/functions/geo/geohash
|
||||
sidebar_label: Geohash
|
||||
title: "Functions for Working with Geohash"
|
||||
---
|
||||
|
||||
# Functions for Working with Geohash
|
||||
|
||||
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earth’s surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location.
|
||||
|
||||
If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/).
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/functions/geo/h3
|
||||
sidebar_label: H3 Indexes
|
||||
title: "Functions for Working with H3 Indexes"
|
||||
---
|
||||
|
||||
# Functions for Working with H3 Indexes
|
||||
|
||||
[H3](https://eng.uber.com/h3/) is a geographical indexing system where Earth’s surface divided into a grid of even hexagonal cells. This system is hierarchical, i. e. each hexagon on the top level ("parent") can be split into seven even but smaller ones ("children"), and so on.
|
||||
|
||||
The level of the hierarchy is called `resolution` and can receive a value from `0` till `15`, where `0` is the `base` level with the largest and coarsest cells.
|
||||
|
@ -2,9 +2,9 @@
|
||||
slug: /en/sql-reference/functions/geo/
|
||||
sidebar_label: Geo
|
||||
sidebar_position: 62
|
||||
title: "Geo Functions"
|
||||
---
|
||||
|
||||
# Geo Functions
|
||||
|
||||
## Geographical Coordinates Functions
|
||||
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/functions/nlp-functions
|
||||
sidebar_position: 67
|
||||
sidebar_label: NLP
|
||||
title: "[experimental] Natural Language Processing functions"
|
||||
---
|
||||
|
||||
# [experimental] Natural Language Processing functions
|
||||
|
||||
:::warning
|
||||
This is an experimental feature that is currently in development and is not ready for general use. It will change in unpredictable backwards-incompatible ways in future releases. Set `allow_experimental_nlp_functions = 1` to enable it.
|
||||
:::
|
||||
|
@ -495,25 +495,23 @@ If the ‘s’ string is non-empty and does not contain the ‘c’ character at
|
||||
|
||||
Returns the string ‘s’ that was converted from the encoding in ‘from’ to the encoding in ‘to’.
|
||||
|
||||
## Base58Encode(plaintext), Base58Decode(encoded_text)
|
||||
## base58Encode(plaintext)
|
||||
|
||||
Accepts a String and encodes/decodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
|
||||
Accepts a String and encodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
base58Encode(decoded)
|
||||
base58Decode(encoded)
|
||||
base58Encode(plaintext)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `decoded` — [String](../../sql-reference/data-types/string.md) column or constant.
|
||||
- `encoded` — [String](../../sql-reference/data-types/string.md) column or constant. If the string is not a valid base58-encoded value, an exception is thrown.
|
||||
- `plaintext` — [String](../../sql-reference/data-types/string.md) column or constant.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A string containing encoded/decoded value of 1st argument.
|
||||
- A string containing encoded value of 1st argument.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
@ -523,17 +521,48 @@ Query:
|
||||
|
||||
``` sql
|
||||
SELECT base58Encode('Encoded');
|
||||
SELECT base58Encode('3dc8KtHrwM');
|
||||
```
|
||||
|
||||
Result:
|
||||
```text
|
||||
┌─encodeBase58('Encoded')─┐
|
||||
┌─base58Encode('Encoded')─┐
|
||||
│ 3dc8KtHrwM │
|
||||
└──────────────────────────────────┘
|
||||
┌─decodeBase58('3dc8KtHrwM')─┐
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## base58Decode(encoded_text)
|
||||
|
||||
Accepts a String and decodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
base58Decode(encoded_text)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `encoded_text` — [String](../../sql-reference/data-types/string.md) column or constant. If the string is not a valid base58-encoded value, an exception is thrown.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A string containing decoded value of 1st argument.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT base58Decode('3dc8KtHrwM');
|
||||
```
|
||||
|
||||
Result:
|
||||
```text
|
||||
┌─base58Decode('3dc8KtHrwM')─┐
|
||||
│ Encoded │
|
||||
└────────────────────────────────────┘
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## base64Encode(s)
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/functions/tuple-functions
|
||||
sidebar_position: 66
|
||||
sidebar_label: Tuples
|
||||
title: "Functions for Working with Tuples"
|
||||
---
|
||||
|
||||
# Functions for Working with Tuples
|
||||
|
||||
## tuple
|
||||
|
||||
A function that allows grouping multiple columns.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/functions/tuple-map-functions
|
||||
sidebar_position: 46
|
||||
sidebar_label: Working with maps
|
||||
title: "Functions for maps"
|
||||
---
|
||||
|
||||
# Functions for maps
|
||||
|
||||
## map
|
||||
|
||||
Arranges `key:value` pairs into [Map(key, value)](../../sql-reference/data-types/map.md) data type.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/alter/column
|
||||
sidebar_position: 37
|
||||
sidebar_label: COLUMN
|
||||
title: "Column Manipulations"
|
||||
---
|
||||
|
||||
# Column Manipulations
|
||||
|
||||
A set of queries that allow changing the table structure.
|
||||
|
||||
Syntax:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/alter/partition
|
||||
sidebar_position: 38
|
||||
sidebar_label: PARTITION
|
||||
title: "Manipulating Partitions and Parts"
|
||||
---
|
||||
|
||||
# Manipulating Partitions and Parts
|
||||
|
||||
The following operations with [partitions](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) are available:
|
||||
|
||||
- [DETACH PARTITION\|PART](#detach-partitionpart) — Moves a partition or part to the `detached` directory and forget it.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/alter/projection
|
||||
sidebar_position: 49
|
||||
sidebar_label: PROJECTION
|
||||
title: "Manipulating Projections"
|
||||
---
|
||||
|
||||
# Manipulating Projections
|
||||
|
||||
The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available:
|
||||
|
||||
- `ALTER TABLE [db].name ADD PROJECTION name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/alter/quota
|
||||
sidebar_position: 46
|
||||
sidebar_label: QUOTA
|
||||
title: "ALTER QUOTA"
|
||||
---
|
||||
|
||||
# ALTER QUOTA
|
||||
|
||||
Changes quotas.
|
||||
|
||||
Syntax:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/alter/sample-by
|
||||
sidebar_position: 41
|
||||
sidebar_label: SAMPLE BY
|
||||
title: "Manipulating Sampling-Key Expressions"
|
||||
---
|
||||
|
||||
# Manipulating Sampling-Key Expressions
|
||||
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/alter/user
|
||||
sidebar_position: 45
|
||||
sidebar_label: USER
|
||||
title: "ALTER USER"
|
||||
---
|
||||
|
||||
# ALTER USER
|
||||
|
||||
Changes ClickHouse user accounts.
|
||||
|
||||
Syntax:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/attach
|
||||
sidebar_position: 40
|
||||
sidebar_label: ATTACH
|
||||
title: "ATTACH Statement"
|
||||
---
|
||||
|
||||
# ATTACH Statement
|
||||
|
||||
Attaches a table or a dictionary, for example, when moving a database to another server.
|
||||
|
||||
**Syntax**
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/check-table
|
||||
sidebar_position: 41
|
||||
sidebar_label: CHECK
|
||||
title: "CHECK TABLE Statement"
|
||||
---
|
||||
|
||||
# CHECK TABLE Statement
|
||||
|
||||
Checks if the data in the table is corrupted.
|
||||
|
||||
``` sql
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/create/dictionary
|
||||
sidebar_position: 38
|
||||
sidebar_label: DICTIONARY
|
||||
title: "CREATE DICTIONARY"
|
||||
---
|
||||
|
||||
# CREATE DICTIONARY
|
||||
|
||||
Creates a new [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
|
||||
**Syntax**
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/create/quota
|
||||
sidebar_position: 42
|
||||
sidebar_label: QUOTA
|
||||
title: "CREATE QUOTA"
|
||||
---
|
||||
|
||||
# CREATE QUOTA
|
||||
|
||||
Creates a [quota](../../../operations/access-rights.md#quotas-management) that can be assigned to a user or a role.
|
||||
|
||||
Syntax:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/create/role
|
||||
sidebar_position: 40
|
||||
sidebar_label: ROLE
|
||||
title: "CREATE ROLE"
|
||||
---
|
||||
|
||||
# CREATE ROLE
|
||||
|
||||
Creates new [roles](../../../operations/access-rights.md#role-management). Role is a set of [privileges](../../../sql-reference/statements/grant.md#grant-privileges). A [user](../../../sql-reference/statements/create/user.md) assigned a role gets all the privileges of this role.
|
||||
|
||||
Syntax:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/create/row-policy
|
||||
sidebar_position: 41
|
||||
sidebar_label: ROW POLICY
|
||||
title: "CREATE ROW POLICY"
|
||||
---
|
||||
|
||||
# CREATE ROW POLICY
|
||||
|
||||
Creates a [row policy](../../../operations/access-rights.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table.
|
||||
|
||||
:::warning
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/create/settings-profile
|
||||
sidebar_position: 43
|
||||
sidebar_label: SETTINGS PROFILE
|
||||
title: "CREATE SETTINGS PROFILE"
|
||||
---
|
||||
|
||||
# CREATE SETTINGS PROFILE
|
||||
|
||||
Creates [settings profiles](../../../operations/access-rights.md#settings-profiles-management) that can be assigned to a user or a role.
|
||||
|
||||
Syntax:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/create/table
|
||||
sidebar_position: 36
|
||||
sidebar_label: TABLE
|
||||
title: "CREATE TABLE"
|
||||
---
|
||||
|
||||
# CREATE TABLE
|
||||
|
||||
Creates a new table. This query can have various syntax forms depending on a use case.
|
||||
|
||||
By default, tables are created only on the current server. Distributed DDL queries are implemented as `ON CLUSTER` clause, which is [described separately](../../../sql-reference/distributed-ddl.md).
|
||||
|
@ -2,13 +2,9 @@
|
||||
slug: /en/sql-reference/statements/create/user
|
||||
sidebar_position: 39
|
||||
sidebar_label: USER
|
||||
tags:
|
||||
- create user
|
||||
- add user
|
||||
title: "CREATE USER"
|
||||
---
|
||||
|
||||
# CREATE USER
|
||||
|
||||
Creates [user accounts](../../../operations/access-rights.md#user-account-management).
|
||||
|
||||
Syntax:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/describe-table
|
||||
sidebar_position: 42
|
||||
sidebar_label: DESCRIBE
|
||||
title: "DESCRIBE TABLE"
|
||||
---
|
||||
|
||||
# DESCRIBE TABLE
|
||||
|
||||
Returns information about table columns.
|
||||
|
||||
**Syntax**
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/detach
|
||||
sidebar_position: 43
|
||||
sidebar_label: DETACH
|
||||
title: "DETACH Statement"
|
||||
---
|
||||
|
||||
# DETACH Statement
|
||||
|
||||
Makes the server "forget" about the existence of a table, a materialized view, or a dictionary.
|
||||
|
||||
**Syntax**
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/explain
|
||||
sidebar_position: 39
|
||||
sidebar_label: EXPLAIN
|
||||
title: "EXPLAIN Statement"
|
||||
---
|
||||
|
||||
# EXPLAIN Statement
|
||||
|
||||
Shows the execution plan of a statement.
|
||||
|
||||
Syntax:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/kill
|
||||
sidebar_position: 46
|
||||
sidebar_label: KILL
|
||||
title: "KILL Statements"
|
||||
---
|
||||
|
||||
# KILL Statements
|
||||
|
||||
There are two kinds of kill statements: to kill a query and to kill a mutation
|
||||
|
||||
## KILL QUERY
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/optimize
|
||||
sidebar_position: 47
|
||||
sidebar_label: OPTIMIZE
|
||||
title: "OPTIMIZE Statement"
|
||||
---
|
||||
|
||||
# OPTIMIZE Statement
|
||||
|
||||
This query tries to initialize an unscheduled merge of data parts for tables.
|
||||
|
||||
:::warning
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/statements/select/offset
|
||||
sidebar_label: OFFSET
|
||||
title: "OFFSET FETCH Clause"
|
||||
---
|
||||
|
||||
# OFFSET FETCH Clause
|
||||
|
||||
`OFFSET` and `FETCH` allow you to retrieve data by portions. They specify a row block which you want to get by a single query.
|
||||
|
||||
``` sql
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/set-role
|
||||
sidebar_position: 51
|
||||
sidebar_label: SET ROLE
|
||||
title: "SET ROLE Statement"
|
||||
---
|
||||
|
||||
# SET ROLE Statement
|
||||
|
||||
Activates roles for the current user.
|
||||
|
||||
``` sql
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/table-functions/cluster
|
||||
sidebar_position: 50
|
||||
sidebar_label: cluster
|
||||
title: "cluster, clusterAllReplicas"
|
||||
---
|
||||
|
||||
# cluster, clusterAllReplicas
|
||||
|
||||
Allows to access all shards in an existing cluster which configured in `remote_servers` section without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. One replica of each shard is queried.
|
||||
|
||||
`clusterAllReplicas` function — same as `cluster`, but all replicas are queried. Each replica in a cluster is used as a separate shard/connection.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/table-functions/dictionary
|
||||
sidebar_position: 54
|
||||
sidebar_label: dictionary function
|
||||
title: dictionary
|
||||
---
|
||||
|
||||
# dictionary
|
||||
|
||||
Displays the [dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) data as a ClickHouse table. Works the same way as [Dictionary](../../engines/table-engines/special/dictionary.md) engine.
|
||||
|
||||
**Syntax**
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/table-functions/null
|
||||
sidebar_position: 53
|
||||
sidebar_label: null function
|
||||
title: 'null'
|
||||
---
|
||||
|
||||
# null
|
||||
|
||||
Creates a temporary table of the specified structure with the [Null](../../engines/table-engines/special/null.md) table engine. According to the `Null`-engine properties, the table data is ignored and the table itself is immediately dropped right after the query execution. The function is used for the convenience of test writing and demonstrations.
|
||||
|
||||
**Syntax**
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/table-functions/s3Cluster
|
||||
sidebar_position: 55
|
||||
sidebar_label: s3Cluster
|
||||
title: "s3Cluster Table Function"
|
||||
---
|
||||
|
||||
# s3Cluster Table Function
|
||||
|
||||
Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterics in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished.
|
||||
|
||||
**Syntax**
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/table-functions/sqlite
|
||||
sidebar_position: 55
|
||||
sidebar_label: sqlite
|
||||
title: sqlite
|
||||
---
|
||||
|
||||
## sqlite
|
||||
|
||||
Allows to perform queries on a data stored in an [SQLite](../../engines/database-engines/sqlite.md) database.
|
||||
|
||||
**Syntax**
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/table-functions/view
|
||||
sidebar_position: 51
|
||||
sidebar_label: view
|
||||
title: view
|
||||
---
|
||||
|
||||
## view
|
||||
|
||||
Turns a subquery into a table. The function implements views (see [CREATE VIEW](https://clickhouse.com/docs/en/sql-reference/statements/create/view/#create-view)). The resulting table does not store data, but only stores the specified `SELECT` query. When reading from the table, ClickHouse executes the query and deletes all unnecessary columns from the result.
|
||||
|
||||
**Syntax**
|
||||
|
@ -344,7 +344,7 @@ Eсли суммарное число активных кусков во все
|
||||
|
||||
**Использование**
|
||||
|
||||
Значение настройки `min_bytes_to_rebalance_partition_over_jbod` должно быть меньше значения настройки [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool). Иначе ClickHouse сгенерирует исключение.
|
||||
Значение настройки `min_bytes_to_rebalance_partition_over_jbod` должно быть не меньше значения настройки [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool) / 1024. Иначе ClickHouse сгенерирует исключение.
|
||||
|
||||
## detach_not_byte_identical_parts {#detach_not_byte_identical_parts}
|
||||
|
||||
|
@ -407,7 +407,7 @@ RANGE(MIN StartDate MAX EndDate);
|
||||
|
||||
Чтобы увеличить производительность кэша, используйте подзапрос с `LIMIT`, а снаружи вызывайте функцию со словарём.
|
||||
|
||||
Поддерживаются [источники](external-dicts-dict-sources.md): MySQL, ClickHouse, executable, HTTP.
|
||||
Поддерживаются все виды источников.
|
||||
|
||||
Пример настройки:
|
||||
|
||||
|
@ -491,21 +491,21 @@ SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY (key1, key2);
|
||||
|
||||
Возвращает сконвертированную из кодировки from в кодировку to строку s.
|
||||
|
||||
## Base58Encode(plaintext), Base58Decode(encoded_text) {#base58}
|
||||
## base58Encode(plaintext), base58Decode(encoded_text) {#base58}
|
||||
|
||||
Принимает на вход строку или колонку строк и кодирует/раскодирует их с помощью схемы кодирования [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) с использованием стандартного алфавита Bitcoin.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
```sql
|
||||
encodeBase58(decoded)
|
||||
decodeBase58(encoded)
|
||||
base58Encode(decoded)
|
||||
base58Decode(encoded)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `decoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md).
|
||||
- `encoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md). Если входная строка не является корректным кодом для какой-либо другой строки, возникнет исключение `1001`.
|
||||
- `encoded` — Колонка или строка типа [String](../../sql-reference/data-types/string.md). Если входная строка не является корректным кодом для какой-либо другой строки, возникнет исключение.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
@ -518,18 +518,18 @@ decodeBase58(encoded)
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT encodeBase58('encode');
|
||||
SELECT decodeBase58('izCFiDUY');
|
||||
SELECT base58Encode('Encoded');
|
||||
SELECT base58Decode('3dc8KtHrwM');
|
||||
```
|
||||
|
||||
Результат:
|
||||
```text
|
||||
┌─encodeBase58('encode', 'flickr')─┐
|
||||
│ SvyTHb1D │
|
||||
└──────────────────────────────────┘
|
||||
┌─decodeBase58('izCFiDUY', 'ripple')─┐
|
||||
│ decode │
|
||||
└────────────────────────────────────┘
|
||||
┌─base58Encode('Encoded')─┐
|
||||
│ 3dc8KtHrwM │
|
||||
└─────────────────────────┘
|
||||
┌─base58Decode('3dc8KtHrwM')─┐
|
||||
│ Encoded │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## base64Encode(s) {#base64encode}
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
slug: /ru/sql-reference/statements/alter/
|
||||
slug: /ru/sql-reference/statements/alter/index
|
||||
toc_hidden_folder: true
|
||||
sidebar_position: 42
|
||||
sidebar_label: "Манипуляции с индексами"
|
||||
|
@ -153,8 +153,8 @@ ClickHouse只有一个物理排序,由 `order by` 条件决定。要创建一
|
||||
|
||||
* 修改列类型。必须与原始类型兼容,否则复制将失败。例如,可以将`UInt32`列修改为`UInt64`,不能将 `String` 列修改为 `Array(String)`。
|
||||
* 修改 [column TTL](../table-engines/mergetree-family/mergetree/#mergetree-column-ttl).
|
||||
* 修改 [column compression codec](../../sql-reference/statements/create/table.md/#codecs).
|
||||
* 增加 [ALIAS columns](../../sql-reference/statements/create/table.md/#alias).
|
||||
* 修改 [column compression codec](../../sql-reference/statements/create/table.mdx#codecs).
|
||||
* 增加 [ALIAS columns](../../sql-reference/statements/create/table.mdx#alias).
|
||||
* 增加 [skipping indexes](../table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes)
|
||||
* 增加 [projections](../table-engines/mergetree-family/mergetree/#projections).
|
||||
请注意,当使用 `SELECT ... FINAL ` (MaterializedMySQL默认是这样做的) 时,预测优化是被禁用的,所以这里是受限的, `INDEX ... TYPE hypothesis `[在v21.12的博客文章中描述]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)可能在这种情况下更有用。
|
||||
|
@ -34,7 +34,7 @@ CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_na
|
||||
|
||||
当创建数据库的新副本时,该副本会自己创建表。如果副本已经不可用很长一段时间,并且已经滞后于复制日志-它用ZooKeeper中的当前元数据检查它的本地元数据,将带有数据的额外表移动到一个单独的非复制数据库(以免意外地删除任何多余的东西),创建缺失的表,如果表名已经被重命名,则更新表名。数据在`ReplicatedMergeTree`级别被复制,也就是说,如果表没有被复制,数据将不会被复制(数据库只负责元数据)。
|
||||
|
||||
允许[`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.md)查询,但不允许复制。数据库引擎将只向当前副本添加/获取/删除分区/部件。但是,如果表本身使用了Replicated表引擎,那么数据将在使用`ATTACH`后被复制。
|
||||
允许[`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.mdx)查询,但不允许复制。数据库引擎将只向当前副本添加/获取/删除分区/部件。但是,如果表本身使用了Replicated表引擎,那么数据将在使用`ATTACH`后被复制。
|
||||
## 使用示例 {#usage-example}
|
||||
|
||||
创建三台主机的集群:
|
||||
|
@ -1 +0,0 @@
|
||||
../../../../en/engines/table-engines/integrations/ExternalDistributed.md
|
@ -0,0 +1,10 @@
|
||||
---
|
||||
slug: /zh/engines/table-engines/integrations/ExternalDistributed
|
||||
sidebar_position: 12
|
||||
sidebar_label: ExternalDistributed
|
||||
title: ExternalDistributed
|
||||
---
|
||||
|
||||
import Content from '@site/docs/en/engines/table-engines/integrations/ExternalDistributed.md';
|
||||
|
||||
<Content />
|
@ -25,7 +25,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
) ENGINE = Hive('thrift://host:port', 'database', 'table');
|
||||
PARTITION BY expr
|
||||
```
|
||||
查看[CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query)查询的详细描述。
|
||||
查看[CREATE TABLE](../../../sql-reference/statements/create/table.mdx#create-table-query)查询的详细描述。
|
||||
|
||||
表的结构可以与原来的Hive表结构有所不同:
|
||||
- 列名应该与原来的Hive表相同,但你可以使用这些列中的一些,并以任何顺序,你也可以使用一些从其他列计算的别名列。
|
||||
|
@ -1 +0,0 @@
|
||||
../../../../en/engines/table-engines/integrations/materialized-postgresql.md
|
@ -0,0 +1,10 @@
|
||||
---
|
||||
slug: /zh/engines/table-engines/integrations/materialized-postgresql
|
||||
sidebar_position: 12
|
||||
sidebar_label: MaterializedPostgreSQL
|
||||
title: MaterializedPostgreSQL
|
||||
---
|
||||
|
||||
import Content from '@site/docs/en/engines/table-engines/integrations/materialized-postgresql.md';
|
||||
|
||||
<Content />
|
@ -19,7 +19,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]);
|
||||
```
|
||||
|
||||
<!-- 详情请见 [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) 查询. -->
|
||||
<!-- 详情请见 [CREATE TABLE](../../../sql-reference/statements/create/table.mdx#create-table-query) 查询. -->
|
||||
|
||||
表结构可以与 PostgreSQL 源表结构不同:
|
||||
|
||||
|
@ -57,4 +57,4 @@ SELECT * FROM sqlite_db.table2 ORDER BY col1;
|
||||
**详见**
|
||||
|
||||
- [SQLite](../../../engines/database-engines/sqlite.md) 引擎
|
||||
- [sqlite](../../../sql-reference/table-functions/sqlite.md) 表方法函数
|
||||
- [sqlite](../../../sql-reference/table-functions/sqlite.mdx) 表方法函数
|
||||
|
@ -285,7 +285,7 @@ sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data
|
||||
- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size)
|
||||
- [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size)
|
||||
- [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold)
|
||||
- [max_replicated_fetches_network_bandwidth](../../../operations/settings/merge-tree-settings.md#max_replicated_fetches_network_bandwidth)
|
||||
- [max_replicated_sends_network_bandwidth](../../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth)
|
||||
- [max_replicated_fetches_network_bandwidth](../../../operations/settings/merge-tree-settings.mdx#max_replicated_fetches_network_bandwidth)
|
||||
- [max_replicated_sends_network_bandwidth](../../../operations/settings/merge-tree-settings.mdx#max_replicated_sends_network_bandwidth)
|
||||
|
||||
[原始文章](https://clickhouse.com/docs/en/operations/table_engines/replication/) <!--hide-->
|
||||
|
@ -226,7 +226,7 @@ SELECT 查询会被发送到所有分片,并且无论数据在分片中如何
|
||||
- `_shard_num` — 表`system.clusters` 中的 `shard_num` 值 . 数据类型: [UInt32](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
!!! note "备注"
|
||||
因为 [remote](../../../sql-reference/table-functions/remote.md) 和 [cluster](../../../sql-reference/table-functions/cluster.md) 表方法内部创建了分布式表, `_shard_num` 对他们都有效.
|
||||
因为 [remote](../../../sql-reference/table-functions/remote.md) 和 [cluster](../../../sql-reference/table-functions/cluster.mdx) 表方法内部创建了分布式表, `_shard_num` 对他们都有效.
|
||||
|
||||
**详见**
|
||||
- [虚拟列](../../../engines/table-engines/index.md#table_engines-virtual_columns) 描述
|
||||
|
@ -18,7 +18,7 @@ Indexes
|
||||
: ClickHouse keeps data structures in memory that allows reading not only used columns but only necessary row ranges of those columns.
|
||||
|
||||
Data compression
|
||||
: Storing different values of the same column together often leads to better compression ratios (compared to row-oriented systems) because in real data column often has the same or not so many different values for neighboring rows. In addition to general-purpose compression, ClickHouse supports [specialized codecs](../../sql-reference/statements/create/table.md/#create-query-specialized-codecs) that can make data even more compact.
|
||||
: Storing different values of the same column together often leads to better compression ratios (compared to row-oriented systems) because in real data column often has the same or not so many different values for neighboring rows. In addition to general-purpose compression, ClickHouse supports [specialized codecs](../../sql-reference/statements/create/table.mdx/#create-query-specialized-codecs) that can make data even more compact.
|
||||
|
||||
Vectorized query execution
|
||||
: ClickHouse not only stores data in columns but also processes data in columns. It leads to better CPU cache utilization and allows for [SIMD](https://en.wikipedia.org/wiki/SIMD) CPU instructions usage.
|
||||
|
@ -35,7 +35,7 @@ More details on [mutations](../../sql-reference/statements/alter.md/#alter-mutat
|
||||
|
||||
`ALTER TABLE ... DROP PARTITION` provides a cost-efficient way to drop a whole partition. It’s not that flexible and needs proper partitioning scheme configured on table creation, but still covers most common cases. Like mutations need to be executed from an external system for regular use.
|
||||
|
||||
More details on [manipulating partitions](../../sql-reference/statements/alter/partition.md/#alter_drop-partition).
|
||||
More details on [manipulating partitions](../../sql-reference/statements/alter/partition.mdx/#alter_drop-partition).
|
||||
|
||||
## TRUNCATE {#truncate}
|
||||
|
||||
|
@ -11,7 +11,7 @@ ClickHouse是一个通用的数据存储解决方案[OLAP](../../faq/general/ola
|
||||
|
||||
|
||||
|
||||
首先,有 **[specialized codecs](../../sql-reference/statements/create/table.md#create-query-specialized-codecs)**,这是典型的时间序列。无论是常见的算法,如“DoubleDelta”和“Gorilla”,或特定的ClickHouse 数据类型如“T64”。
|
||||
首先,有 **[specialized codecs](../../sql-reference/statements/create/table.mdx#create-query-specialized-codecs)**,这是典型的时间序列。无论是常见的算法,如“DoubleDelta”和“Gorilla”,或特定的ClickHouse 数据类型如“T64”。
|
||||
|
||||
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
../../../en/getting-started/example-datasets/brown-benchmark.md
|
10
docs/zh/getting-started/example-datasets/brown-benchmark.mdx
Normal file
10
docs/zh/getting-started/example-datasets/brown-benchmark.mdx
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
slug: /zh/getting-started/example-datasets/brown-benchmark
|
||||
sidebar_label: Brown University Benchmark
|
||||
description: A new analytical benchmark for machine-generated log data
|
||||
title: "Brown University Benchmark"
|
||||
---
|
||||
|
||||
import Content from '@site/docs/en/getting-started/example-datasets/brown-benchmark.md';
|
||||
|
||||
<Content />
|
@ -1 +0,0 @@
|
||||
../../../en/getting-started/example-datasets/cell-towers.md
|
9
docs/zh/getting-started/example-datasets/cell-towers.mdx
Normal file
9
docs/zh/getting-started/example-datasets/cell-towers.mdx
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
slug: /zh/getting-started/example-datasets/cell-towers
|
||||
sidebar_label: Cell Towers
|
||||
title: "Cell Towers"
|
||||
---
|
||||
|
||||
import Content from '@site/docs/en/getting-started/example-datasets/cell-towers.md';
|
||||
|
||||
<Content />
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user