mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'master' of https://github.com/ClickHouse/ClickHouse into taiyang-li-disk-reload
This commit is contained in:
commit
5f5bbc1fca
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -19,9 +19,9 @@ Detailed description / Documentation draft:
|
||||
...
|
||||
|
||||
|
||||
By adding documentation, you'll allow users to try your new feature immediately, not when someone else will have time to document it later. Documentation is necessary for all features that affect user experience in any way. You can add brief documentation draft above, or add documentation right into your patch as Markdown files in [docs](https://github.com/ClickHouse/ClickHouse/tree/master/docs) folder.
|
||||
> By adding documentation, you'll allow users to try your new feature immediately, not when someone else will have time to document it later. Documentation is necessary for all features that affect user experience in any way. You can add brief documentation draft above, or add documentation right into your patch as Markdown files in [docs](https://github.com/ClickHouse/ClickHouse/tree/master/docs) folder.
|
||||
|
||||
If you are doing this for the first time, it's recommended to read the lightweight [Contributing to ClickHouse Documentation](https://github.com/ClickHouse/ClickHouse/tree/master/docs/README.md) guide first.
|
||||
> If you are doing this for the first time, it's recommended to read the lightweight [Contributing to ClickHouse Documentation](https://github.com/ClickHouse/ClickHouse/tree/master/docs/README.md) guide first.
|
||||
|
||||
|
||||
Information about CI checks: https://clickhouse.tech/docs/en/development/continuous-integration/
|
||||
> Information about CI checks: https://clickhouse.tech/docs/en/development/continuous-integration/
|
||||
|
@ -259,10 +259,25 @@ private:
|
||||
Poco::Logger * log;
|
||||
BaseDaemon & daemon;
|
||||
|
||||
void onTerminate(const std::string & message, UInt32 thread_num) const
|
||||
void onTerminate(std::string_view message, UInt32 thread_num) const
|
||||
{
|
||||
size_t pos = message.find('\n');
|
||||
|
||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) {}",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, message);
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, message.substr(0, pos));
|
||||
|
||||
/// Print trace from std::terminate exception line-by-line to make it easy for grep.
|
||||
while (pos != std::string_view::npos)
|
||||
{
|
||||
++pos;
|
||||
size_t next_pos = message.find('\n', pos);
|
||||
size_t size = next_pos;
|
||||
if (next_pos != std::string_view::npos)
|
||||
size = next_pos - pos;
|
||||
|
||||
LOG_FATAL(log, "{}", message.substr(pos, size));
|
||||
pos = next_pos;
|
||||
}
|
||||
}
|
||||
|
||||
void onFault(
|
||||
|
@ -8,6 +8,7 @@ if [ -x ./clickhouse ]
|
||||
then
|
||||
CLICKHOUSE_CLIENT="./clickhouse client"
|
||||
elif command -v clickhouse-client >/dev/null 2>&1
|
||||
then
|
||||
CLICKHOUSE_CLIENT="clickhouse-client"
|
||||
else
|
||||
echo "clickhouse-client is not found"
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 976874b7aa7f422bf4ea595bb7d1166c617b1c26
|
||||
Subproject commit 0ce9490093021c63564cca159571a8b27772ad48
|
@ -22,6 +22,7 @@ set(SRCS
|
||||
"${LIBRARY_DIR}/src/launcher.cxx"
|
||||
"${LIBRARY_DIR}/src/srv_config.cxx"
|
||||
"${LIBRARY_DIR}/src/snapshot_sync_req.cxx"
|
||||
"${LIBRARY_DIR}/src/snapshot_sync_ctx.cxx"
|
||||
"${LIBRARY_DIR}/src/handle_timeout.cxx"
|
||||
"${LIBRARY_DIR}/src/handle_append_entries.cxx"
|
||||
"${LIBRARY_DIR}/src/cluster_config.cxx"
|
||||
|
2
contrib/protobuf
vendored
2
contrib/protobuf
vendored
@ -1 +1 @@
|
||||
Subproject commit 73b12814204ad9068ba352914d0dc244648b48ee
|
||||
Subproject commit 75601841d172c73ae6bf4ce8121f42b875cdbabd
|
@ -299,6 +299,7 @@ function run_tests
|
||||
01318_decrypt # Depends on OpenSSL
|
||||
01663_aes_msan # Depends on OpenSSL
|
||||
01667_aes_args_check # Depends on OpenSSL
|
||||
01683_codec_encrypted # Depends on OpenSSL
|
||||
01776_decrypt_aead_size_check # Depends on OpenSSL
|
||||
01811_filter_by_null # Depends on OpenSSL
|
||||
01281_unsucceeded_insert_select_queries_counter
|
||||
|
@ -20,6 +20,7 @@ def get_skip_list_cmd(path):
|
||||
|
||||
def get_options(i):
|
||||
options = []
|
||||
client_options = []
|
||||
if 0 < i:
|
||||
options.append("--order=random")
|
||||
|
||||
@ -27,25 +28,29 @@ def get_options(i):
|
||||
options.append("--db-engine=Ordinary")
|
||||
|
||||
if i % 3 == 2:
|
||||
options.append('''--client-option='allow_experimental_database_replicated=1' --db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i))
|
||||
options.append('''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i))
|
||||
client_options.append('allow_experimental_database_replicated=1')
|
||||
|
||||
# If database name is not specified, new database is created for each functional test.
|
||||
# Run some threads with one database for all tests.
|
||||
if i % 2 == 1:
|
||||
options.append(" --database=test_{}".format(i))
|
||||
|
||||
if i % 7 == 0:
|
||||
options.append(" --client-option='join_use_nulls=1'")
|
||||
if i % 5 == 1:
|
||||
client_options.append("join_use_nulls=1")
|
||||
|
||||
if i % 14 == 0:
|
||||
options.append(' --client-option="join_algorithm=\'partial_merge\'"')
|
||||
if i % 15 == 6:
|
||||
client_options.append("join_algorithm='partial_merge'")
|
||||
|
||||
if i % 21 == 0:
|
||||
options.append(' --client-option="join_algorithm=\'auto\'"')
|
||||
options.append(' --client-option="max_rows_in_join=1000"')
|
||||
if i % 15 == 11:
|
||||
client_options.append("join_algorithm='auto'")
|
||||
client_options.append('max_rows_in_join=1000')
|
||||
|
||||
if i == 13:
|
||||
options.append(" --client-option='memory_tracker_fault_probability=0.00001'")
|
||||
client_options.append('memory_tracker_fault_probability=0.001')
|
||||
|
||||
if client_options:
|
||||
options.append(" --client-option " + ' '.join(client_options))
|
||||
|
||||
return ' '.join(options)
|
||||
|
||||
|
@ -35,7 +35,7 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip3 install urllib3 testflows==1.6.90 docker-compose==1.29.1 docker==5.0.0 dicttoxml kazoo tzlocal python-dateutil numpy
|
||||
RUN pip3 install urllib3 testflows==1.7.20 docker-compose==1.29.1 docker==5.0.0 dicttoxml kazoo tzlocal python-dateutil numpy
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 20.10.6
|
||||
|
@ -9,7 +9,7 @@ Many developers can say that the code is the best docs by itself, and they are r
|
||||
If you want to help ClickHouse with documentation you can face, for example, the following questions:
|
||||
|
||||
- "I don't know how to write."
|
||||
|
||||
|
||||
We have prepared some [recommendations](#what-to-write) for you.
|
||||
|
||||
- "I know what I want to write, but I don't know how to contribute to docs."
|
||||
@ -71,17 +71,17 @@ Contribute all new information in English language. Other languages are translat
|
||||
```
|
||||
|
||||
- Bold text: `**asterisks**` or `__underlines__`.
|
||||
- Links: `[link text](uri)`. Examples:
|
||||
- Links: `[link text](uri)`. Examples:
|
||||
|
||||
- External link: `[ClickHouse repo](https://github.com/ClickHouse/ClickHouse)`
|
||||
- Cross link: `[How to build docs](tools/README.md)`
|
||||
|
||||
- Images: `![Exclamation sign](uri)`. You can refer to local images as well as remote in internet.
|
||||
- Lists: Lists can be of two types:
|
||||
|
||||
|
||||
- `- unordered`: Each item starts from the `-`.
|
||||
- `1. ordered`: Each item starts from the number.
|
||||
|
||||
|
||||
A list must be separated from the text by an empty line. Nested lists must be indented with 4 spaces.
|
||||
|
||||
- Inline code: `` `in backticks` ``.
|
||||
@ -107,7 +107,7 @@ Contribute all new information in English language. Other languages are translat
|
||||
- Text hidden behind a cut (single sting that opens on click):
|
||||
|
||||
```text
|
||||
<details markdown="1"> <summary>Visible text</summary>
|
||||
<details markdown="1"> <summary>Visible text</summary>
|
||||
Hidden content.
|
||||
</details>`.
|
||||
```
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority:
|
||||
toc_title:
|
||||
toc_priority:
|
||||
toc_title:
|
||||
---
|
||||
|
||||
# data_type_name {#data_type-name}
|
||||
|
@ -58,6 +58,6 @@ Result:
|
||||
|
||||
Follow up with any text to clarify the example.
|
||||
|
||||
**See Also**
|
||||
**See Also**
|
||||
|
||||
- [link](#)
|
||||
|
@ -14,8 +14,8 @@ More text (Optional).
|
||||
|
||||
**Arguments** (Optional)
|
||||
|
||||
- `x` — Description. Optional (only for optional arguments). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
|
||||
- `y` — Description. Optional (only for optional arguments). Possible values: <values list>.Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
|
||||
- `x` — Description. Optional (only for optional arguments). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
|
||||
- `y` — Description. Optional (only for optional arguments). Possible values: <values list>.Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
|
||||
|
||||
**Parameters** (Optional, only for parametric aggregate functions)
|
||||
|
||||
@ -23,7 +23,7 @@ More text (Optional).
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
- Returned values list.
|
||||
- Returned values list.
|
||||
|
||||
Type: [Type name](relative/path/to/type/dscr.md#type).
|
||||
|
||||
|
@ -16,8 +16,8 @@ Better:
|
||||
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF)
|
||||
```
|
||||
|
||||
If the option's purpose can't be guessed by its name, or the purpose guess may be misleading, or option has some
|
||||
pre-conditions, leave a comment above the `option()` line and explain what it does.
|
||||
If the option's purpose can't be guessed by its name, or the purpose guess may be misleading, or option has some
|
||||
pre-conditions, leave a comment above the `option()` line and explain what it does.
|
||||
The best way would be linking the docs page (if it exists).
|
||||
The comment is parsed into a separate column (see below).
|
||||
|
||||
@ -33,7 +33,7 @@ option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests"
|
||||
|
||||
Suppose you have an option that may strip debug symbols from the ClickHouse's part.
|
||||
This can speed up the linking process, but produces a binary that cannot be debugged.
|
||||
In that case, prefer explicitly raising a warning telling the developer that he may be doing something wrong.
|
||||
In that case, prefer explicitly raising a warning telling the developer that he may be doing something wrong.
|
||||
Also, such options should be disabled if applies.
|
||||
|
||||
Bad:
|
||||
|
@ -7,7 +7,7 @@ toc_title: Support
|
||||
|
||||
!!! info "Info"
|
||||
If you have launched a ClickHouse commercial support service, feel free to [open a pull-request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) adding it to the following list.
|
||||
|
||||
|
||||
## Yandex.Cloud
|
||||
|
||||
ClickHouse worldwide support from the authors of ClickHouse. Supports on-premise and cloud deployments. Ask details on clickhouse-support@yandex-team.com
|
||||
|
@ -4,11 +4,11 @@ ClickHouse has hundreds (or even thousands) of features. Every commit gets check
|
||||
|
||||
The core functionality is very well tested, but some corner-cases and different combinations of features can be uncovered with ClickHouse CI.
|
||||
|
||||
Most of the bugs/regressions we see happen in that 'grey area' where test coverage is poor.
|
||||
Most of the bugs/regressions we see happen in that 'grey area' where test coverage is poor.
|
||||
|
||||
And we are very interested in covering most of the possible scenarios and feature combinations used in real life by tests.
|
||||
And we are very interested in covering most of the possible scenarios and feature combinations used in real life by tests.
|
||||
|
||||
## Why adding tests
|
||||
## Why adding tests
|
||||
|
||||
Why/when you should add a test case into ClickHouse code:
|
||||
1) you use some complicated scenarios / feature combinations / you have some corner case which is probably not widely used
|
||||
@ -17,18 +17,18 @@ Why/when you should add a test case into ClickHouse code:
|
||||
4) once the test is added/accepted, you can be sure the corner case you check will never be accidentally broken.
|
||||
5) you will be a part of great open-source community
|
||||
6) your name will be visible in the `system.contributors` table!
|
||||
7) you will make a world bit better :)
|
||||
7) you will make a world bit better :)
|
||||
|
||||
### Steps to do
|
||||
|
||||
#### Prerequisite
|
||||
#### Prerequisite
|
||||
|
||||
I assume you run some Linux machine (you can use docker / virtual machines on other OS) and any modern browser / internet connection, and you have some basic Linux & SQL skills.
|
||||
I assume you run some Linux machine (you can use docker / virtual machines on other OS) and any modern browser / internet connection, and you have some basic Linux & SQL skills.
|
||||
|
||||
Any highly specialized knowledge is not needed (so you don't need to know C++ or know something about how ClickHouse CI works).
|
||||
|
||||
|
||||
#### Preparation
|
||||
#### Preparation
|
||||
|
||||
1) [create GitHub account](https://github.com/join) (if you haven't one yet)
|
||||
2) [setup git](https://docs.github.com/en/free-pro-team@latest/github/getting-started-with-github/set-up-git)
|
||||
@ -54,17 +54,17 @@ git remote add upstream https://github.com/ClickHouse/ClickHouse
|
||||
|
||||
#### New branch for the test
|
||||
|
||||
1) create a new branch from the latest clickhouse master
|
||||
1) create a new branch from the latest clickhouse master
|
||||
```
|
||||
cd ~/workspace/ClickHouse
|
||||
git fetch upstream
|
||||
git checkout -b name_for_a_branch_with_my_test upstream/master
|
||||
git checkout -b name_for_a_branch_with_my_test upstream/master
|
||||
```
|
||||
|
||||
#### Install & run clickhouse
|
||||
#### Install & run clickhouse
|
||||
|
||||
1) install `clickhouse-server` (follow [official docs](https://clickhouse.tech/docs/en/getting-started/install/))
|
||||
2) install test configurations (it will use Zookeeper mock implementation and adjust some settings)
|
||||
2) install test configurations (it will use Zookeeper mock implementation and adjust some settings)
|
||||
```
|
||||
cd ~/workspace/ClickHouse/tests/config
|
||||
sudo ./install.sh
|
||||
@ -74,7 +74,7 @@ sudo ./install.sh
|
||||
sudo systemctl restart clickhouse-server
|
||||
```
|
||||
|
||||
#### Creating the test file
|
||||
#### Creating the test file
|
||||
|
||||
|
||||
1) find the number for your test - find the file with the biggest number in `tests/queries/0_stateless/`
|
||||
@ -86,7 +86,7 @@ tests/queries/0_stateless/01520_client_print_query_id.reference
|
||||
```
|
||||
Currently, the last number for the test is `01520`, so my test will have the number `01521`
|
||||
|
||||
2) create an SQL file with the next number and name of the feature you test
|
||||
2) create an SQL file with the next number and name of the feature you test
|
||||
|
||||
```sh
|
||||
touch tests/queries/0_stateless/01521_dummy_test.sql
|
||||
@ -112,16 +112,16 @@ clickhouse-client -nmT < tests/queries/0_stateless/01521_dummy_test.sql | tee te
|
||||
- fast - should not take longer than a few seconds (better subseconds)
|
||||
- correct - fails then feature is not working
|
||||
- deterministic
|
||||
- isolated / stateless
|
||||
- isolated / stateless
|
||||
- don't rely on some environment things
|
||||
- don't rely on timing when possible
|
||||
- try to cover corner cases (zeros / Nulls / empty sets / throwing exceptions)
|
||||
- don't rely on timing when possible
|
||||
- try to cover corner cases (zeros / Nulls / empty sets / throwing exceptions)
|
||||
- to test that query return errors, you can put special comment after the query: `-- { serverError 60 }` or `-- { clientError 20 }`
|
||||
- don't switch databases (unless necessary)
|
||||
- you can create several table replicas on the same node if needed
|
||||
- you can use one of the test cluster definitions when needed (see system.clusters)
|
||||
- use `number` / `numbers_mt` / `zeros` / `zeros_mt` and similar for queries / to initialize data when applicable
|
||||
- clean up the created objects after test and before the test (DROP IF EXISTS) - in case of some dirty state
|
||||
- clean up the created objects after test and before the test (DROP IF EXISTS) - in case of some dirty state
|
||||
- prefer sync mode of operations (mutations, merges, etc.)
|
||||
- use other SQL files in the `0_stateless` folder as an example
|
||||
- ensure the feature / feature combination you want to test is not yet covered with existing tests
|
||||
@ -138,7 +138,7 @@ It's important to name tests correctly, so one could turn some tests subset off
|
||||
|
||||
#### Commit / push / create PR.
|
||||
|
||||
1) commit & push your changes
|
||||
1) commit & push your changes
|
||||
```sh
|
||||
cd ~/workspace/ClickHouse
|
||||
git add tests/queries/0_stateless/01521_dummy_test.sql
|
||||
@ -147,5 +147,5 @@ git commit # use some nice commit message when possible
|
||||
git push origin HEAD
|
||||
```
|
||||
2) use a link which was shown during the push, to create a PR into the main repo
|
||||
3) adjust the PR title and contents, in `Changelog category (leave one)` keep
|
||||
`Build/Testing/Packaging Improvement`, fill the rest of the fields if you want.
|
||||
3) adjust the PR title and contents, in `Changelog category (leave one)` keep
|
||||
`Build/Testing/Packaging Improvement`, fill the rest of the fields if you want.
|
||||
|
@ -17,7 +17,7 @@ It supports non-blocking [DROP TABLE](#drop-detach-table) and [RENAME TABLE](#re
|
||||
|
||||
### Table UUID {#table-uuid}
|
||||
|
||||
All tables in database `Atomic` have persistent [UUID](../../sql-reference/data-types/uuid.md) and store data in directory `/clickhouse_path/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`, where `xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy` is UUID of the table.
|
||||
All tables in database `Atomic` have persistent [UUID](../../sql-reference/data-types/uuid.md) and store data in directory `/clickhouse_path/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`, where `xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy` is UUID of the table.
|
||||
Usually, the UUID is generated automatically, but the user can also explicitly specify the UUID in the same way when creating the table (this is not recommended). To display the `SHOW CREATE` query with the UUID you can use setting [show_table_uuid_in_table_create_query_if_not_nil](../../operations/settings/settings.md#show_table_uuid_in_table_create_query_if_not_nil). For example:
|
||||
|
||||
```sql
|
||||
|
@ -36,8 +36,8 @@ ENGINE = MaterializedMySQL('host:port', ['database' | database], 'user', 'passwo
|
||||
- `max_wait_time_when_mysql_unavailable` — Retry interval when MySQL is not available (milliseconds). Negative value disable retry. Default: `1000`.
|
||||
- `allows_query_when_mysql_lost` — Allow query materialized table when mysql is lost. Default: `0` (`false`).
|
||||
```
|
||||
CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***')
|
||||
SETTINGS
|
||||
CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***')
|
||||
SETTINGS
|
||||
allows_query_when_mysql_lost=true,
|
||||
max_wait_time_when_mysql_unavailable=10000;
|
||||
```
|
||||
@ -52,10 +52,10 @@ For the correct work of `MaterializeMySQL`, there are few mandatory `MySQL`-side
|
||||
## Virtual columns {#virtual-columns}
|
||||
|
||||
When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns.
|
||||
|
||||
|
||||
- `_version` — Transaction counter. Type [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
- `_sign` — Deletion mark. Type [Int8](../../sql-reference/data-types/int-uint.md). Possible values:
|
||||
- `1` — Row is not deleted,
|
||||
- `1` — Row is not deleted,
|
||||
- `-1` — Row is deleted.
|
||||
|
||||
## Data Types Support {#data_types-support}
|
||||
@ -103,7 +103,7 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
|
||||
|
||||
- MySQL `INSERT` query is converted into `INSERT` with `_sign=1`.
|
||||
|
||||
- MySQL `DELETE` query is converted into `INSERT` with `_sign=-1`.
|
||||
- MySQL `DELETE` query is converted into `INSERT` with `_sign=-1`.
|
||||
|
||||
- MySQL `UPDATE` query is converted into `INSERT` with `_sign=-1` and `INSERT` with `_sign=1`.
|
||||
|
||||
@ -146,9 +146,9 @@ mysql> SELECT * FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
+---+------+------+
|
||||
+---+------+------+
|
||||
| a | b | c |
|
||||
+---+------+------+
|
||||
+---+------+------+
|
||||
| 2 | 222 | Wow! |
|
||||
+---+------+------+
|
||||
```
|
||||
@ -175,9 +175,9 @@ SELECT * FROM mysql.test;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─a─┬──b─┐
|
||||
│ 1 │ 11 │
|
||||
│ 2 │ 22 │
|
||||
┌─a─┬──b─┐
|
||||
│ 1 │ 11 │
|
||||
│ 2 │ 22 │
|
||||
└───┴────┘
|
||||
```
|
||||
|
||||
@ -188,8 +188,8 @@ SELECT * FROM mysql.test;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─a─┬───b─┬─c────┐
|
||||
│ 2 │ 222 │ Wow! │
|
||||
┌─a─┬───b─┬─c────┐
|
||||
│ 2 │ 222 │ Wow! │
|
||||
└───┴─────┴──────┘
|
||||
```
|
||||
|
||||
|
@ -53,7 +53,7 @@ All other MySQL data types are converted into [String](../../sql-reference/data-
|
||||
|
||||
## Global Variables Support {#global-variables-support}
|
||||
|
||||
For better compatibility you may address global variables in MySQL style, as `@@identifier`.
|
||||
For better compatibility you may address global variables in MySQL style, as `@@identifier`.
|
||||
|
||||
These variables are supported:
|
||||
- `version`
|
||||
|
@ -14,7 +14,7 @@ Supports table structure modifications (`ALTER TABLE ... ADD|DROP COLUMN`). If `
|
||||
## Creating a Database {#creating-a-database}
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE test_database
|
||||
CREATE DATABASE test_database
|
||||
ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `use_table_cache`]);
|
||||
```
|
||||
|
||||
@ -43,14 +43,14 @@ ENGINE = PostgreSQL('host:port', 'database', 'user', 'password'[, `use_table_cac
|
||||
| TEXT, CHAR | [String](../../sql-reference/data-types/string.md) |
|
||||
| INTEGER | Nullable([Int32](../../sql-reference/data-types/int-uint.md))|
|
||||
| ARRAY | [Array](../../sql-reference/data-types/array.md) |
|
||||
|
||||
|
||||
|
||||
## Examples of Use {#examples-of-use}
|
||||
|
||||
Database in ClickHouse, exchanging data with the PostgreSQL server:
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE test_database
|
||||
CREATE DATABASE test_database
|
||||
ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 1);
|
||||
```
|
||||
|
||||
@ -102,7 +102,7 @@ SELECT * FROM test_database.test_table;
|
||||
└────────┴───────┘
|
||||
```
|
||||
|
||||
Consider the table structure was modified in PostgreSQL:
|
||||
Consider the table structure was modified in PostgreSQL:
|
||||
|
||||
``` sql
|
||||
postgre> ALTER TABLE test_table ADD COLUMN data Text
|
||||
|
@ -1,6 +1,6 @@
|
||||
# [experimental] Replicated {#replicated}
|
||||
|
||||
The engine is based on the [Atomic](../../engines/database-engines/atomic.md) engine. It supports replication of metadata via DDL log being written to ZooKeeper and executed on all of the replicas for a given database.
|
||||
The engine is based on the [Atomic](../../engines/database-engines/atomic.md) engine. It supports replication of metadata via DDL log being written to ZooKeeper and executed on all of the replicas for a given database.
|
||||
|
||||
One ClickHouse server can have multiple replicated databases running and updating at the same time. But there can't be multiple replicas of the same replicated database.
|
||||
|
||||
@ -20,9 +20,9 @@ One ClickHouse server can have multiple replicated databases running and updatin
|
||||
|
||||
## Specifics and Recommendations {#specifics-and-recommendations}
|
||||
|
||||
DDL queries with `Replicated` database work in a similar way to [ON CLUSTER](../../sql-reference/distributed-ddl.md) queries, but with minor differences.
|
||||
DDL queries with `Replicated` database work in a similar way to [ON CLUSTER](../../sql-reference/distributed-ddl.md) queries, but with minor differences.
|
||||
|
||||
First, the DDL request tries to execute on the initiator (the host that originally received the request from the user). If the request is not fulfilled, then the user immediately receives an error, other hosts do not try to fulfill it. If the request has been successfully completed on the initiator, then all other hosts will automatically retry until they complete it. The initiator will try to wait for the query to be completed on other hosts (no longer than [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout)) and will return a table with the query execution statuses on each host.
|
||||
First, the DDL request tries to execute on the initiator (the host that originally received the request from the user). If the request is not fulfilled, then the user immediately receives an error, other hosts do not try to fulfill it. If the request has been successfully completed on the initiator, then all other hosts will automatically retry until they complete it. The initiator will try to wait for the query to be completed on other hosts (no longer than [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout)) and will return a table with the query execution statuses on each host.
|
||||
|
||||
The behavior in case of errors is regulated by the [distributed_ddl_output_mode](../../operations/settings/settings.md#distributed_ddl_output_mode) setting, for a `Replicated` database it is better to set it to `null_status_on_timeout` — i.e. if some hosts did not have time to execute the request for [distributed_ddl_task_timeout](../../operations/settings/settings.md#distributed_ddl_task_timeout), then do not throw an exception, but show the `NULL` status for them in the table.
|
||||
|
||||
@ -47,8 +47,8 @@ CREATE TABLE r.rmt (n UInt64) ENGINE=ReplicatedMergeTree ORDER BY n;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─────hosts────────────┬──status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
|
||||
│ shard1|replica1 │ 0 │ │ 2 │ 0 │
|
||||
┌─────hosts────────────┬──status─┬─error─┬─num_hosts_remaining─┬─num_hosts_active─┐
|
||||
│ shard1|replica1 │ 0 │ │ 2 │ 0 │
|
||||
│ shard1|other_replica │ 0 │ │ 1 │ 0 │
|
||||
│ other_shard|r1 │ 0 │ │ 0 │ 0 │
|
||||
└──────────────────────┴─────────┴───────┴─────────────────────┴──────────────────┘
|
||||
@ -57,13 +57,13 @@ CREATE TABLE r.rmt (n UInt64) ENGINE=ReplicatedMergeTree ORDER BY n;
|
||||
Showing the system table:
|
||||
|
||||
``` sql
|
||||
SELECT cluster, shard_num, replica_num, host_name, host_address, port, is_local
|
||||
SELECT cluster, shard_num, replica_num, host_name, host_address, port, is_local
|
||||
FROM system.clusters WHERE cluster='r';
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
|
||||
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
|
||||
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
|
||||
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
|
||||
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
|
||||
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
|
||||
└─────────┴───────────┴─────────────┴───────────┴──────────────┴──────┴──────────┘
|
||||
@ -78,9 +78,9 @@ node1 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─hosts─┬─groupArray(n)─┐
|
||||
│ node1 │ [1,3,5,7,9] │
|
||||
│ node2 │ [0,2,4,6,8] │
|
||||
┌─hosts─┬─groupArray(n)─┐
|
||||
│ node1 │ [1,3,5,7,9] │
|
||||
│ node2 │ [0,2,4,6,8] │
|
||||
└───────┴───────────────┘
|
||||
```
|
||||
|
||||
@ -93,8 +93,8 @@ node4 :) CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','r2');
|
||||
The cluster configuration will look like this:
|
||||
|
||||
``` text
|
||||
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
|
||||
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
|
||||
┌─cluster─┬─shard_num─┬─replica_num─┬─host_name─┬─host_address─┬─port─┬─is_local─┐
|
||||
│ r │ 1 │ 1 │ node3 │ 127.0.0.1 │ 9002 │ 0 │
|
||||
│ r │ 1 │ 2 │ node4 │ 127.0.0.1 │ 9003 │ 0 │
|
||||
│ r │ 2 │ 1 │ node2 │ 127.0.0.1 │ 9001 │ 0 │
|
||||
│ r │ 2 │ 2 │ node1 │ 127.0.0.1 │ 9000 │ 1 │
|
||||
@ -108,8 +108,8 @@ node2 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY
|
||||
```
|
||||
|
||||
```text
|
||||
┌─hosts─┬─groupArray(n)─┐
|
||||
│ node2 │ [1,3,5,7,9] │
|
||||
│ node4 │ [0,2,4,6,8] │
|
||||
┌─hosts─┬─groupArray(n)─┐
|
||||
│ node2 │ [1,3,5,7,9] │
|
||||
│ node4 │ [0,2,4,6,8] │
|
||||
└───────┴───────────────┘
|
||||
```
|
@ -35,7 +35,7 @@ The table structure can differ from the original table structure:
|
||||
- `password` — User password.
|
||||
|
||||
## Implementation Details {#implementation-details}
|
||||
|
||||
|
||||
Supports multiple replicas that must be listed by `|` and shards must be listed by `,`. For example:
|
||||
|
||||
```sql
|
||||
|
@ -37,7 +37,7 @@ Table in ClickHouse which allows to read data from MongoDB collection:
|
||||
``` text
|
||||
CREATE TABLE mongo_table
|
||||
(
|
||||
key UInt64,
|
||||
key UInt64,
|
||||
data String
|
||||
) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'testuser', 'clickhouse');
|
||||
```
|
||||
|
@ -49,14 +49,14 @@ PostgreSQL `Array` types are converted into ClickHouse arrays.
|
||||
|
||||
!!! info "Note"
|
||||
Be careful - in PostgreSQL an array data, created like a `type_name[]`, may contain multi-dimensional arrays of different dimensions in different table rows in same column. But in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
|
||||
|
||||
|
||||
Supports multiple replicas that must be listed by `|`. For example:
|
||||
|
||||
```sql
|
||||
CREATE TABLE test_replicas (id UInt32, name String) ENGINE = PostgreSQL(`postgres{2|3|4}:5432`, 'clickhouse', 'test_replicas', 'postgres', 'mysecretpassword');
|
||||
```
|
||||
|
||||
Replicas priority for PostgreSQL dictionary source is supported. The bigger the number in map, the less the priority. The highest priority is `0`.
|
||||
Replicas priority for PostgreSQL dictionary source is supported. The bigger the number in map, the less the priority. The highest priority is `0`.
|
||||
|
||||
In the example below replica `example01-1` has the highest priority:
|
||||
|
||||
|
@ -101,7 +101,7 @@ For very large clusters, you can use different ZooKeeper clusters for different
|
||||
|
||||
Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting.
|
||||
|
||||
`ReplicatedMergeTree` engine uses a separate thread pool for replicated fetches. Size of the pool is limited by the [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) setting which can be tuned with a server restart.
|
||||
`ReplicatedMergeTree` engine uses a separate thread pool for replicated fetches. Size of the pool is limited by the [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) setting which can be tuned with a server restart.
|
||||
|
||||
By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option.
|
||||
|
||||
@ -155,7 +155,7 @@ CREATE TABLE table_name
|
||||
|
||||
</details>
|
||||
|
||||
As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the «[macros](../../../operations/server-configuration-parameters/settings/#macros) section of the configuration file.
|
||||
As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the «[macros](../../../operations/server-configuration-parameters/settings/#macros) section of the configuration file.
|
||||
|
||||
Example:
|
||||
|
||||
@ -198,7 +198,7 @@ In this case, you can omit arguments when creating tables:
|
||||
``` sql
|
||||
CREATE TABLE table_name (
|
||||
x UInt32
|
||||
) ENGINE = ReplicatedMergeTree
|
||||
) ENGINE = ReplicatedMergeTree
|
||||
ORDER BY x;
|
||||
```
|
||||
|
||||
@ -207,7 +207,7 @@ It is equivalent to:
|
||||
``` sql
|
||||
CREATE TABLE table_name (
|
||||
x UInt32
|
||||
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/{database}/table_name', '{replica}')
|
||||
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/{database}/table_name', '{replica}')
|
||||
ORDER BY x;
|
||||
```
|
||||
|
||||
|
@ -16,7 +16,7 @@ $ curl 'http://localhost:8123/'
|
||||
Ok.
|
||||
```
|
||||
|
||||
Web UI can be accessed here: `http://localhost:8123/play`.
|
||||
Web UI can be accessed here: `http://localhost:8123/play`.
|
||||
|
||||
![Web UI](../images/play.png)
|
||||
|
||||
|
@ -43,7 +43,7 @@ toc_title: Integrations
|
||||
- Monitoring
|
||||
- [Graphite](https://graphiteapp.org)
|
||||
- [graphouse](https://github.com/yandex/graphouse)
|
||||
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) +
|
||||
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse)
|
||||
- [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
|
||||
- [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) could be applied
|
||||
- [Grafana](https://grafana.com/)
|
||||
|
@ -34,7 +34,7 @@ Configuration template:
|
||||
<min_part_size>...</min_part_size>
|
||||
<min_part_size_ratio>...</min_part_size_ratio>
|
||||
<method>...</method>
|
||||
<level>...</level>
|
||||
<level>...</level>
|
||||
</case>
|
||||
...
|
||||
</compression>
|
||||
@ -64,11 +64,33 @@ If no conditions met for a data part, ClickHouse uses the `lz4` compression.
|
||||
<min_part_size>10000000000</min_part_size>
|
||||
<min_part_size_ratio>0.01</min_part_size_ratio>
|
||||
<method>zstd</method>
|
||||
<level>1</level>
|
||||
<level>1</level>
|
||||
</case>
|
||||
</compression>
|
||||
```
|
||||
|
||||
## encryption {#server-settings-encryption}
|
||||
|
||||
Configures a command to obtain a key to be used by [encryption codecs](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). The command, or a shell script, is expected to write a Base64-encoded key of any length to the stdout.
|
||||
|
||||
**Example**
|
||||
|
||||
For Linux with systemd:
|
||||
|
||||
```xml
|
||||
<encryption>
|
||||
<key_command>/usr/bin/systemd-ask-password --id="clickhouse-server" --timeout=0 "Enter the ClickHouse encryption passphrase:" | base64</key_command>
|
||||
</encryption>
|
||||
```
|
||||
|
||||
For other systems:
|
||||
|
||||
```xml
|
||||
<encryption>
|
||||
<key_command><![CDATA[IFS=; echo -n >/dev/tty "Enter the ClickHouse encryption passphrase: "; stty=`stty -F /dev/tty -g`; stty -F /dev/tty -echo; read k </dev/tty; stty -F /dev/tty "$stty"; echo -n $k | base64]]></key_command>
|
||||
</encryption>
|
||||
```
|
||||
|
||||
## custom_settings_prefixes {#custom_settings_prefixes}
|
||||
|
||||
List of prefixes for [custom settings](../../operations/settings/index.md#custom_settings). The prefixes must be separated with commas.
|
||||
@ -101,7 +123,7 @@ Default value: `1073741824` (1 GB).
|
||||
```xml
|
||||
<core_dump>
|
||||
<size_limit>1073741824</size_limit>
|
||||
</core_dump>
|
||||
</core_dump>
|
||||
```
|
||||
## database_atomic_delay_before_drop_table_sec {#database_atomic_delay_before_drop_table_sec}
|
||||
|
||||
@ -442,8 +464,8 @@ The server will need access to the public Internet via IPv4 (at the time of writ
|
||||
|
||||
Keys:
|
||||
|
||||
- `enabled` – Boolean flag to enable the feature, `false` by default. Set to `true` to allow sending crash reports.
|
||||
- `endpoint` – You can override the Sentry endpoint URL for sending crash reports. It can be either a separate Sentry account or your self-hosted Sentry instance. Use the [Sentry DSN](https://docs.sentry.io/error-reporting/quickstart/?platform=native#configure-the-sdk) syntax.
|
||||
- `enabled` – Boolean flag to enable the feature, `false` by default. Set to `true` to allow sending crash reports.
|
||||
- `endpoint` – You can override the Sentry endpoint URL for sending crash reports. It can be either a separate Sentry account or your self-hosted Sentry instance. Use the [Sentry DSN](https://docs.sentry.io/error-reporting/quickstart/?platform=native#configure-the-sdk) syntax.
|
||||
- `anonymize` - Avoid attaching the server hostname to the crash report.
|
||||
- `http_proxy` - Configure HTTP proxy for sending crash reports.
|
||||
- `debug` - Sets the Sentry client into debug mode.
|
||||
@ -505,7 +527,7 @@ The default `max_server_memory_usage` value is calculated as `memory_amount * ma
|
||||
|
||||
## max_server_memory_usage_to_ram_ratio {#max_server_memory_usage_to_ram_ratio}
|
||||
|
||||
Defines the fraction of total physical RAM amount, available to the ClickHouse server. If the server tries to utilize more, the memory is cut down to the appropriate amount.
|
||||
Defines the fraction of total physical RAM amount, available to the ClickHouse server. If the server tries to utilize more, the memory is cut down to the appropriate amount.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -883,7 +905,7 @@ Parameters:
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
```xml
|
||||
<yandex>
|
||||
<text_log>
|
||||
<level>notice</level>
|
||||
|
@ -31,7 +31,7 @@ Settings that can only be made in the server config file are not covered in this
|
||||
|
||||
## Custom Settings {#custom_settings}
|
||||
|
||||
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
|
||||
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
|
||||
|
||||
A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file.
|
||||
|
||||
@ -48,7 +48,7 @@ SET custom_a = 123;
|
||||
To get the current value of a custom setting use `getSetting()` function:
|
||||
|
||||
```sql
|
||||
SELECT getSetting('custom_a');
|
||||
SELECT getSetting('custom_a');
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
@ -65,20 +65,20 @@ What to do when the volume of data read exceeds one of the limits: ‘throw’ o
|
||||
The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little.
|
||||
|
||||
A maximum number of rows that can be read from a local table on a leaf node when running a distributed query. While
|
||||
distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will be checked only on the read
|
||||
stage on the leaf nodes and ignored on results merging stage on the root node. For example, cluster consists of 2 shards
|
||||
and each shard contains a table with 100 rows. Then distributed query which suppose to read all the data from both
|
||||
tables with setting `max_rows_to_read=150` will fail as in total it will be 200 rows. While query
|
||||
distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will be checked only on the read
|
||||
stage on the leaf nodes and ignored on results merging stage on the root node. For example, cluster consists of 2 shards
|
||||
and each shard contains a table with 100 rows. Then distributed query which suppose to read all the data from both
|
||||
tables with setting `max_rows_to_read=150` will fail as in total it will be 200 rows. While query
|
||||
with `max_rows_to_read_leaf=150` will succeed since leaf nodes will read 100 rows at max.
|
||||
|
||||
## max_bytes_to_read_leaf {#max-bytes-to-read-leaf}
|
||||
|
||||
A maximum number of bytes (uncompressed data) that can be read from a local table on a leaf node when running
|
||||
a distributed query. While distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will
|
||||
be checked only on the read stage on the leaf nodes and ignored on results merging stage on the root node.
|
||||
For example, cluster consists of 2 shards and each shard contains a table with 100 bytes of data.
|
||||
Then distributed query which suppose to read all the data from both tables with setting `max_bytes_to_read=150` will fail
|
||||
as in total it will be 200 bytes. While query with `max_bytes_to_read_leaf=150` will succeed since leaf nodes will read
|
||||
A maximum number of bytes (uncompressed data) that can be read from a local table on a leaf node when running
|
||||
a distributed query. While distributed queries can issue a multiple sub-queries to each shard (leaf) - this limit will
|
||||
be checked only on the read stage on the leaf nodes and ignored on results merging stage on the root node.
|
||||
For example, cluster consists of 2 shards and each shard contains a table with 100 bytes of data.
|
||||
Then distributed query which suppose to read all the data from both tables with setting `max_bytes_to_read=150` will fail
|
||||
as in total it will be 200 bytes. While query with `max_bytes_to_read_leaf=150` will succeed since leaf nodes will read
|
||||
100 bytes at max.
|
||||
|
||||
## read_overflow_mode_leaf {#read-overflow-mode-leaf}
|
||||
|
@ -20,6 +20,29 @@ Possible values:
|
||||
- `global` — Replaces the `IN`/`JOIN` query with `GLOBAL IN`/`GLOBAL JOIN.`
|
||||
- `allow` — Allows the use of these types of subqueries.
|
||||
|
||||
## prefer_global_in_and_join {#prefer-global-in-and-join}
|
||||
|
||||
Enables the replacement of `IN`/`JOIN` operators with `GLOBAL IN`/`GLOBAL JOIN`.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled. `IN`/`JOIN` operators are not replaced with `GLOBAL IN`/`GLOBAL JOIN`.
|
||||
- 1 — Enabled. `IN`/`JOIN` operators are replaced with `GLOBAL IN`/`GLOBAL JOIN`.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Usage**
|
||||
|
||||
Although `SET distributed_product_mode=global` can change the queries behavior for the distributed tables, it's not suitable for local tables or tables from external resources. Here is when the `prefer_global_in_and_join` setting comes into play.
|
||||
|
||||
For example, we have query serving nodes that contain local tables, which are not suitable for distribution. We need to scatter their data on the fly during distributed processing with the `GLOBAL` keyword — `GLOBAL IN`/`GLOBAL JOIN`.
|
||||
|
||||
Another use case of `prefer_global_in_and_join` is accessing tables created by external engines. This setting helps to reduce the number of calls to external sources while joining such tables: only one call per query.
|
||||
|
||||
**See also:**
|
||||
|
||||
- [Distributed subqueries](../../sql-reference/operators/in.md#select-distributed-subqueries) for more information on how to use `GLOBAL IN`/`GLOBAL JOIN`
|
||||
|
||||
## enable_optimize_predicate_expression {#enable-optimize-predicate-expression}
|
||||
|
||||
Turns on predicate pushdown in `SELECT` queries.
|
||||
@ -542,7 +565,7 @@ Possible values:
|
||||
|
||||
Default value: `hash`.
|
||||
|
||||
When using `hash` algorithm the right part of `JOIN` is uploaded into RAM.
|
||||
When using `hash` algorithm the right part of `JOIN` is uploaded into RAM.
|
||||
|
||||
When using `partial_merge` algorithm ClickHouse sorts the data and dumps it to the disk. The `merge` algorithm in ClickHouse differs a bit from the classic realization. First ClickHouse sorts the right table by [join key](../../sql-reference/statements/select/join.md#select-join) in blocks and creates min-max index for sorted blocks. Then it sorts parts of left table by `join key` and joins them over right table. The min-max index is also used to skip unneeded right table blocks.
|
||||
|
||||
@ -1251,7 +1274,7 @@ Default value: `3`.
|
||||
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
|
||||
|
||||
Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md#json) format.
|
||||
Such integers are enclosed in quotes by default. This behavior is compatible with most JavaScript implementations.
|
||||
Such integers are enclosed in quotes by default. This behavior is compatible with most JavaScript implementations.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -3209,7 +3232,7 @@ Default value: `300`.
|
||||
|
||||
## distributed_ddl_task_timeout {#distributed_ddl_task_timeout}
|
||||
|
||||
Sets timeout for DDL query responses from all hosts in cluster. If a DDL request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. Negative value means infinite.
|
||||
Sets timeout for DDL query responses from all hosts in cluster. If a DDL request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. Negative value means infinite.
|
||||
|
||||
Possible values:
|
||||
|
||||
|
@ -33,7 +33,7 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
||||
|
||||
**See Also**
|
||||
|
||||
- [system.asynchronous_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics, calculated periodically in the background.
|
||||
- [system.asynchronous_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics, calculated periodically in the background.
|
||||
- [system.metric_log](../system-tables/metric_log.md) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metric_log) <!--hide-->
|
||||
|
@ -4,7 +4,7 @@ Contains information about columns in all the tables.
|
||||
|
||||
You can use this table to get information similar to the [DESCRIBE TABLE](../../sql-reference/statements/misc.md#misc-describe-table) query, but for multiple tables at once.
|
||||
|
||||
Columns from [temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.columns` only in those session where they have been created. They are shown with the empty `database` field.
|
||||
Columns from [temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.columns` only in those session where they have been created. They are shown with the empty `database` field.
|
||||
|
||||
Columns:
|
||||
|
||||
@ -38,17 +38,17 @@ database: system
|
||||
table: aggregate_function_combinators
|
||||
name: name
|
||||
type: String
|
||||
default_kind:
|
||||
default_expression:
|
||||
default_kind:
|
||||
default_expression:
|
||||
data_compressed_bytes: 0
|
||||
data_uncompressed_bytes: 0
|
||||
marks_bytes: 0
|
||||
comment:
|
||||
comment:
|
||||
is_in_partition_key: 0
|
||||
is_in_sorting_key: 0
|
||||
is_in_primary_key: 0
|
||||
is_in_sampling_key: 0
|
||||
compression_codec:
|
||||
compression_codec:
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
@ -56,17 +56,17 @@ database: system
|
||||
table: aggregate_function_combinators
|
||||
name: is_internal
|
||||
type: UInt8
|
||||
default_kind:
|
||||
default_expression:
|
||||
default_kind:
|
||||
default_expression:
|
||||
data_compressed_bytes: 0
|
||||
data_uncompressed_bytes: 0
|
||||
marks_bytes: 0
|
||||
comment:
|
||||
comment:
|
||||
is_in_partition_key: 0
|
||||
is_in_sorting_key: 0
|
||||
is_in_primary_key: 0
|
||||
is_in_sampling_key: 0
|
||||
compression_codec:
|
||||
compression_codec:
|
||||
```
|
||||
|
||||
The `system.columns` table contains the following columns (the column type is shown in brackets):
|
||||
|
@ -21,7 +21,7 @@ Columns:
|
||||
│ default │ /var/lib/clickhouse/ │ 276392587264 │ 490652508160 │ 0 │
|
||||
└─────────┴──────────────────────┴──────────────┴──────────────┴─────────────────┘
|
||||
|
||||
1 rows in set. Elapsed: 0.001 sec.
|
||||
1 rows in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/disks) <!--hide-->
|
||||
|
@ -62,4 +62,3 @@ exception_code: ZOK
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/distributed_ddl_queuedistributed_ddl_queue.md) <!--hide-->
|
||||
|
@ -40,7 +40,7 @@ is_blocked: 1
|
||||
error_count: 0
|
||||
data_files: 1
|
||||
data_compressed_bytes: 499
|
||||
last_exception:
|
||||
last_exception:
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
@ -1,6 +1,6 @@
|
||||
# system.enabled_roles {#system_tables-enabled_roles}
|
||||
|
||||
Contains all active roles at the moment, including current role of the current user and granted roles for current role.
|
||||
Contains all active roles at the moment, including current role of the current user and granted roles for current role.
|
||||
|
||||
Columns:
|
||||
|
||||
|
@ -27,7 +27,7 @@ Columns:
|
||||
│ JSONExtractInt │ 0 │ 0 │ │
|
||||
└──────────────────────────┴──────────────┴──────────────────┴──────────┘
|
||||
|
||||
10 rows in set. Elapsed: 0.002 sec.
|
||||
10 rows in set. Elapsed: 0.002 sec.
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/functions) <!--hide-->
|
||||
|
@ -1,11 +1,11 @@
|
||||
# system.licenses {#system-tables_system.licenses}
|
||||
|
||||
Сontains licenses of third-party libraries that are located in the [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) directory of ClickHouse sources.
|
||||
Сontains licenses of third-party libraries that are located in the [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) directory of ClickHouse sources.
|
||||
|
||||
Columns:
|
||||
|
||||
- `library_name` ([String](../../sql-reference/data-types/string.md)) — Name of the library, which is license connected with.
|
||||
- `license_type` ([String](../../sql-reference/data-types/string.md)) — License type — e.g. Apache, MIT.
|
||||
- `license_type` ([String](../../sql-reference/data-types/string.md)) — License type — e.g. Apache, MIT.
|
||||
- `license_path` ([String](../../sql-reference/data-types/string.md)) — Path to the file with the license text.
|
||||
- `license_text` ([String](../../sql-reference/data-types/string.md)) — License text.
|
||||
|
||||
|
@ -48,7 +48,7 @@ changed: 0
|
||||
description: How many rows in blocks should be formed for merge operations.
|
||||
type: SettingUInt64
|
||||
|
||||
4 rows in set. Elapsed: 0.001 sec.
|
||||
4 rows in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/merge_tree_settings) <!--hide-->
|
||||
|
@ -1,6 +1,6 @@
|
||||
# system.mutations {#system_tables-mutations}
|
||||
|
||||
The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row.
|
||||
The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row.
|
||||
|
||||
Columns:
|
||||
|
||||
@ -16,17 +16,17 @@ Columns:
|
||||
|
||||
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty.
|
||||
|
||||
- `block_numbers.number` ([Array](../../sql-reference/data-types/array.md)([Int64](../../sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition.
|
||||
|
||||
- `block_numbers.number` ([Array](../../sql-reference/data-types/array.md)([Int64](../../sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition.
|
||||
|
||||
In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation.
|
||||
|
||||
- `parts_to_do_names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — An array of names of data parts that need to be mutated for the mutation to complete.
|
||||
|
||||
- `parts_to_do` ([Int64](../../sql-reference/data-types/int-uint.md)) — The number of data parts that need to be mutated for the mutation to complete.
|
||||
|
||||
- `is_done` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values:
|
||||
- `is_done` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values:
|
||||
- `1` if the mutation is completed,
|
||||
- `0` if the mutation is still in process.
|
||||
- `0` if the mutation is still in process.
|
||||
|
||||
!!! info "Note"
|
||||
Even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not completed yet because of a long-running `INSERT` query, that will create a new data part needed to be mutated.
|
||||
|
@ -26,7 +26,7 @@ Reads from this table are not parallelized.
|
||||
│ 9 │
|
||||
└────────┘
|
||||
|
||||
10 rows in set. Elapsed: 0.001 sec.
|
||||
10 rows in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers) <!--hide-->
|
||||
|
@ -24,7 +24,7 @@ Used for tests.
|
||||
│ 9 │
|
||||
└────────┘
|
||||
|
||||
10 rows in set. Elapsed: 0.001 sec.
|
||||
10 rows in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/numbers_mt) <!--hide-->
|
||||
|
@ -17,7 +17,7 @@ This is similar to the `DUAL` table found in other DBMSs.
|
||||
│ 0 │
|
||||
└───────┘
|
||||
|
||||
1 rows in set. Elapsed: 0.001 sec.
|
||||
1 rows in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/one) <!--hide-->
|
||||
|
@ -63,7 +63,7 @@ read_rows: 0
|
||||
read_bytes: 0
|
||||
peak_memory_usage: 0
|
||||
error: 0
|
||||
exception:
|
||||
exception:
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/part_log) <!--hide-->
|
||||
|
@ -19,10 +19,10 @@ Columns:
|
||||
|
||||
Possible Values:
|
||||
|
||||
- `Wide` — Each column is stored in a separate file in a filesystem.
|
||||
- `Compact` — All columns are stored in one file in a filesystem.
|
||||
- `Wide` — Each column is stored in a separate file in a filesystem.
|
||||
- `Compact` — All columns are stored in one file in a filesystem.
|
||||
|
||||
Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table.
|
||||
Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table.
|
||||
|
||||
- `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) – Flag that indicates whether the data part is active. If a data part is active, it’s used in a table. Otherwise, it’s deleted. Inactive data parts remain after merging.
|
||||
|
||||
@ -88,7 +88,7 @@ Columns:
|
||||
|
||||
- `delete_ttl_info_max` ([DateTime](../../sql-reference/data-types/datetime.md)) — The maximum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
|
||||
- `move_ttl_info.expression` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of expressions. Each expression defines a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
- `move_ttl_info.expression` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of expressions. Each expression defines a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
||||
|
||||
!!! note "Warning"
|
||||
The `move_ttl_info.expression` array is kept mostly for backward compatibility, now the simpliest way to check `TTL MOVE` rule is to use the `move_ttl_info.min` and `move_ttl_info.max` fields.
|
||||
|
@ -19,10 +19,10 @@ Columns:
|
||||
|
||||
Possible values:
|
||||
|
||||
- `Wide` — Each column is stored in a separate file in a filesystem.
|
||||
- `Compact` — All columns are stored in one file in a filesystem.
|
||||
- `Wide` — Each column is stored in a separate file in a filesystem.
|
||||
- `Compact` — All columns are stored in one file in a filesystem.
|
||||
|
||||
Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table.
|
||||
Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table.
|
||||
|
||||
- `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the data part is active. If a data part is active, it’s used in a table. Otherwise, it’s deleted. Inactive data parts remain after merging.
|
||||
|
||||
|
@ -51,6 +51,7 @@ Columns:
|
||||
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the databases present in the query.
|
||||
- `tables` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the tables present in the query.
|
||||
- `columns` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the columns present in the query.
|
||||
- `projections` ([String](../../sql-reference/data-types/string.md)) — Names of the projections used during the query execution.
|
||||
- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — Code of an exception.
|
||||
- `exception` ([String](../../sql-reference/data-types/string.md)) — Exception message.
|
||||
- `stack_trace` ([String](../../sql-reference/data-types/string.md)) — [Stack trace](https://en.wikipedia.org/wiki/Stack_trace). An empty string, if the query was completed successfully.
|
||||
@ -65,6 +66,8 @@ Columns:
|
||||
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
|
||||
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
|
||||
- `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Initial query starting time (for distributed query execution).
|
||||
- `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Initial query starting time with microseconds precision (for distributed query execution).
|
||||
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Interface that the query was initiated from. Possible values:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
@ -101,55 +104,77 @@ Columns:
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.query_log WHERE type = 'QueryFinish' AND (query LIKE '%toDate(\'2000-12-05\')%') ORDER BY query_start_time DESC LIMIT 1 FORMAT Vertical;
|
||||
SELECT * FROM system.query_log WHERE type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryStart
|
||||
event_date: 2020-09-11
|
||||
event_time: 2020-09-11 10:08:17
|
||||
event_time_microseconds: 2020-09-11 10:08:17.063321
|
||||
query_start_time: 2020-09-11 10:08:17
|
||||
query_start_time_microseconds: 2020-09-11 10:08:17.063321
|
||||
query_duration_ms: 0
|
||||
read_rows: 0
|
||||
read_bytes: 0
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 0
|
||||
result_bytes: 0
|
||||
memory_usage: 0
|
||||
current_database: default
|
||||
query: INSERT INTO test1 VALUES
|
||||
exception_code: 0
|
||||
type: QueryFinish
|
||||
event_date: 2021-07-28
|
||||
event_time: 2021-07-28 13:46:56
|
||||
event_time_microseconds: 2021-07-28 13:46:56.719791
|
||||
query_start_time: 2021-07-28 13:46:56
|
||||
query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||
query_duration_ms: 14
|
||||
read_rows: 8393
|
||||
read_bytes: 374325
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 4201
|
||||
result_bytes: 153024
|
||||
memory_usage: 4714038
|
||||
current_database: default
|
||||
query: SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)
|
||||
normalized_query_hash: 6666026786019643712
|
||||
query_kind: Select
|
||||
databases: ['system']
|
||||
tables: ['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']
|
||||
columns: ['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']
|
||||
projections: []
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 33452
|
||||
initial_user: default
|
||||
initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 33452
|
||||
interface: 1
|
||||
os_user: bharatnc
|
||||
client_hostname: tower
|
||||
client_name: ClickHouse
|
||||
client_revision: 54437
|
||||
client_version_major: 20
|
||||
client_version_minor: 7
|
||||
client_version_patch: 2
|
||||
http_method: 0
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 51006
|
||||
initial_user: default
|
||||
initial_query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 51006
|
||||
initial_query_start_time: 2021-07-28 13:46:56
|
||||
initial_query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||
interface: 1
|
||||
os_user:
|
||||
client_hostname:
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54449
|
||||
client_version_major: 21
|
||||
client_version_minor: 8
|
||||
client_version_patch: 0
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
http_referer:
|
||||
forwarded_for:
|
||||
quota_key:
|
||||
revision: 54440
|
||||
thread_ids: []
|
||||
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
|
||||
Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
|
||||
revision: 54453
|
||||
log_comment:
|
||||
thread_ids: [5058,22097,22110,22094]
|
||||
ProfileEvents.Names: ['Query','SelectQuery','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSWriteChars']
|
||||
ProfileEvents.Values: [1,1,39,352256,64,360,8393,374325,412,440,34480,13108,4723,671,19,17828,8192,10240]
|
||||
Settings.Names: ['load_balancing','max_memory_usage']
|
||||
Settings.Values: ['random','10000000000']
|
||||
used_aggregate_functions: []
|
||||
used_aggregate_function_combinators: []
|
||||
used_database_engines: []
|
||||
used_data_type_families: ['UInt64','UInt8','Nullable','String','date']
|
||||
used_dictionaries: []
|
||||
used_formats: []
|
||||
used_functions: ['concat','notEmpty','extractAll']
|
||||
used_storages: []
|
||||
used_table_functions: []
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
@ -13,9 +13,9 @@ Columns:
|
||||
- `granted_role_is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a default role. Possible values:
|
||||
- 1 — `granted_role` is a default role.
|
||||
- 0 — `granted_role` is not a default role.
|
||||
|
||||
|
||||
- `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Flag that shows whether `granted_role` is a role with [ADMIN OPTION](../../sql-reference/statements/grant.md#admin-option-privilege) privilege. Possible values:
|
||||
- 1 — The role has `ADMIN OPTION` privilege.
|
||||
- 0 — The role without `ADMIN OPTION` privilege.
|
||||
- 0 — The role without `ADMIN OPTION` privilege.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/role-grants) <!--hide-->
|
||||
|
@ -13,7 +13,7 @@ Columns:
|
||||
|
||||
- `id` ([UUID](../../sql-reference/data-types/uuid.md)) — Row policy ID.
|
||||
|
||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Name of the directory where the row policy is stored.
|
||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Name of the directory where the row policy is stored.
|
||||
|
||||
- `select_filter` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Condition which is used to filter rows.
|
||||
|
||||
|
@ -7,7 +7,7 @@ Columns:
|
||||
|
||||
- `id` ([UUID](../../sql-reference/data-types/uuid.md)) — Setting profile ID.
|
||||
|
||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of setting profiles. Configured in the `access_control_path` parameter.
|
||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of setting profiles. Configured in the `access_control_path` parameter.
|
||||
|
||||
- `num_elements` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of elements for this profile in the `system.settings_profile_elements` table.
|
||||
|
||||
|
@ -1,24 +1,24 @@
|
||||
# system.tables {#system-tables}
|
||||
|
||||
Contains metadata of each table that the server knows about.
|
||||
Contains metadata of each table that the server knows about.
|
||||
|
||||
[Detached](../../sql-reference/statements/detach.md) tables are not shown in `system.tables`.
|
||||
|
||||
[Temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.tables` only in those session where they have been created. They are shown with the empty `database` field and with the `is_temporary` flag switched on.
|
||||
[Temporary tables](../../sql-reference/statements/create/table.md#temporary-tables) are visible in the `system.tables` only in those session where they have been created. They are shown with the empty `database` field and with the `is_temporary` flag switched on.
|
||||
|
||||
Columns:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in.
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in.
|
||||
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
|
||||
- `engine` ([String](../../sql-reference/data-types/string.md)) — Table engine name (without parameters).
|
||||
- `engine` ([String](../../sql-reference/data-types/string.md)) — Table engine name (without parameters).
|
||||
|
||||
- `is_temporary` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag that indicates whether the table is temporary.
|
||||
- `is_temporary` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag that indicates whether the table is temporary.
|
||||
|
||||
- `data_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table data in the file system.
|
||||
- `data_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table data in the file system.
|
||||
|
||||
- `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system.
|
||||
- `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system.
|
||||
|
||||
- `metadata_modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) - Time of latest modification of the table metadata.
|
||||
|
||||
@ -28,33 +28,33 @@ Columns:
|
||||
|
||||
- `create_table_query` ([String](../../sql-reference/data-types/string.md)) - The query that was used to create the table.
|
||||
|
||||
- `engine_full` ([String](../../sql-reference/data-types/string.md)) - Parameters of the table engine.
|
||||
- `engine_full` ([String](../../sql-reference/data-types/string.md)) - Parameters of the table engine.
|
||||
|
||||
- `partition_key` ([String](../../sql-reference/data-types/string.md)) - The partition key expression specified in the table.
|
||||
- `partition_key` ([String](../../sql-reference/data-types/string.md)) - The partition key expression specified in the table.
|
||||
|
||||
- `sorting_key` ([String](../../sql-reference/data-types/string.md)) - The sorting key expression specified in the table.
|
||||
- `sorting_key` ([String](../../sql-reference/data-types/string.md)) - The sorting key expression specified in the table.
|
||||
|
||||
- `primary_key` ([String](../../sql-reference/data-types/string.md)) - The primary key expression specified in the table.
|
||||
- `primary_key` ([String](../../sql-reference/data-types/string.md)) - The primary key expression specified in the table.
|
||||
|
||||
- `sampling_key` ([String](../../sql-reference/data-types/string.md)) - The sampling key expression specified in the table.
|
||||
- `sampling_key` ([String](../../sql-reference/data-types/string.md)) - The sampling key expression specified in the table.
|
||||
|
||||
- `storage_policy` ([String](../../sql-reference/data-types/string.md)) - The storage policy:
|
||||
|
||||
- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes)
|
||||
- [Distributed](../../engines/table-engines/special/distributed.md#distributed)
|
||||
|
||||
- `total_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `NULL` (including underying `Buffer` table).
|
||||
- `total_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `NULL` (including underying `Buffer` table).
|
||||
|
||||
- `total_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `NULL` (does not includes any underlying storage).
|
||||
- `total_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `NULL` (does not includes any underlying storage).
|
||||
|
||||
- If the table stores data on disk, returns used space on disk (i.e. compressed).
|
||||
- If the table stores data in memory, returns approximated number of used bytes in memory.
|
||||
|
||||
- `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows INSERTed since server start (only for `Buffer` tables).
|
||||
- `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows INSERTed since server start (only for `Buffer` tables).
|
||||
|
||||
- `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes INSERTed since server start (only for `Buffer` tables).
|
||||
- `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes INSERTed since server start (only for `Buffer` tables).
|
||||
|
||||
- `comment` ([String](../../sql-reference/data-types/string.md)) - The comment for the table.
|
||||
- `comment` ([String](../../sql-reference/data-types/string.md)) - The comment for the table.
|
||||
|
||||
|
||||
The `system.tables` table is used in `SHOW TABLES` query implementation.
|
||||
|
@ -42,12 +42,12 @@ microseconds: 871397
|
||||
thread_name: clickhouse-serv
|
||||
thread_id: 564917
|
||||
level: Information
|
||||
query_id:
|
||||
query_id:
|
||||
logger_name: DNSCacheUpdater
|
||||
message: Update period 15 seconds
|
||||
revision: 54440
|
||||
source_file: /ClickHouse/src/Interpreters/DNSCacheUpdater.cpp; void DB::DNSCacheUpdater::start()
|
||||
source_line: 45
|
||||
```
|
||||
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system-tables/text_log) <!--hide-->
|
||||
|
@ -49,7 +49,7 @@ timestamp_ns: 1599762189872924510
|
||||
revision: 54440
|
||||
trace_type: Memory
|
||||
thread_id: 564963
|
||||
query_id:
|
||||
query_id:
|
||||
trace: [371912858,371912789,371798468,371799717,371801313,371790250,624462773,566365041,566440261,566445834,566460071,566459914,566459842,566459580,566459469,566459389,566459341,566455774,371993941,371988245,372158848,372187428,372187309,372187093,372185478,140222123165193,140222122205443]
|
||||
size: 5244400
|
||||
```
|
||||
|
@ -7,7 +7,7 @@ Columns:
|
||||
|
||||
- `id` ([UUID](../../sql-reference/data-types/uuid.md)) — User ID.
|
||||
|
||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter.
|
||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter.
|
||||
|
||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password.
|
||||
|
||||
|
@ -16,12 +16,12 @@ $ sudo service clickhouse-server restart
|
||||
If you installed ClickHouse using something other than the recommended `deb` packages, use the appropriate update method.
|
||||
|
||||
!!! note "Note"
|
||||
You can update multiple servers at once as soon as there is no moment when all replicas of one shard are offline.
|
||||
You can update multiple servers at once as soon as there is no moment when all replicas of one shard are offline.
|
||||
|
||||
The upgrade of older version of ClickHouse to specific version:
|
||||
|
||||
As an example:
|
||||
|
||||
|
||||
`xx.yy.a.b` is a current stable version. The latest stable version could be found [here](https://github.com/ClickHouse/ClickHouse/releases)
|
||||
|
||||
```bash
|
||||
|
@ -40,7 +40,7 @@ clickhouse-benchmark [keys] < queries_file;
|
||||
|
||||
## Keys {#clickhouse-benchmark-keys}
|
||||
|
||||
- `--query=QUERY` — Query to execute. If this parameter is not passed, `clickhouse-benchmark` will read queries from standard input.
|
||||
- `--query=QUERY` — Query to execute. If this parameter is not passed, `clickhouse-benchmark` will read queries from standard input.
|
||||
- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` sends simultaneously. Default value: 1.
|
||||
- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (to disable reports set 0). Default value: 1.
|
||||
- `-h HOST`, `--host=HOST` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys.
|
||||
|
@ -74,7 +74,7 @@ Parameters:
|
||||
source cluster & destination clusters accept exactly the same
|
||||
parameters as parameters for the usual Distributed table
|
||||
see https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
|
||||
-->
|
||||
-->
|
||||
<shard>
|
||||
<internal_replication>false</internal_replication>
|
||||
<replica>
|
||||
|
@ -18,7 +18,7 @@ Keys:
|
||||
- `--seed <string>` — Seed arbitrary string that determines the result of obfuscation.
|
||||
- `--backslash` — Add a backslash at the end of each line of the formatted query. Can be useful when you copy a query from web or somewhere else with multiple lines, and want to execute it in command line.
|
||||
|
||||
## Examples {#examples}
|
||||
## Examples {#examples}
|
||||
|
||||
1. Highlighting and single line:
|
||||
|
||||
@ -32,12 +32,12 @@ Result:
|
||||
SELECT sum(number) FROM numbers(5)
|
||||
```
|
||||
|
||||
2. Multiqueries:
|
||||
2. Multiqueries:
|
||||
|
||||
```bash
|
||||
$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
|
||||
```
|
||||
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
@ -58,13 +58,13 @@ FROM
|
||||
```bash
|
||||
$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;"
|
||||
```
|
||||
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END;
|
||||
```
|
||||
|
||||
|
||||
Same query and another seed string:
|
||||
|
||||
```bash
|
||||
@ -95,4 +95,4 @@ FROM \
|
||||
UNION DISTINCT \
|
||||
SELECT 3 \
|
||||
)
|
||||
```
|
||||
```
|
||||
|
@ -38,7 +38,7 @@ Arguments:
|
||||
- `-of`, `--format`, `--output-format` — output format, `TSV` by default.
|
||||
- `-d`, `--database` — default database, `_local` by default.
|
||||
- `--stacktrace` — whether to dump debug output in case of exception.
|
||||
- `--echo` — print query before execution.
|
||||
- `--echo` — print query before execution.
|
||||
- `--verbose` — more details on query execution.
|
||||
- `--logger.console` — Log to console.
|
||||
- `--logger.log` — Log file name.
|
||||
|
@ -255,7 +255,7 @@ windowFunnel(window, [mode, [mode, ... ]])(timestamp, cond1, cond2, ..., condN)
|
||||
|
||||
- `window` — Length of the sliding window, it is the time interval between the first and the last condition. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond1 <= timestamp of cond2 <= ... <= timestamp of condN <= timestamp of cond1 + window`.
|
||||
- `mode` — It is an optional argument. One or more modes can be set.
|
||||
- `'strict'` — If same condition holds for sequence of events then such non-unique events would be skipped.
|
||||
- `'strict'` — If same condition holds for sequence of events then such non-unique events would be skipped.
|
||||
- `'strict_order'` — Don't allow interventions of other events. E.g. in the case of `A->B->D->C`, it stops finding `A->B->C` at the `D` and the max event level is 2.
|
||||
- `'strict_increase'` — Apply conditions only to events with strictly increasing timestamps.
|
||||
|
||||
@ -530,7 +530,7 @@ sequenceNextNode(direction, base)(timestamp, event_column, base_condition, event
|
||||
- tail — Set the base point to the last event.
|
||||
- first_match — Set the base point to the first matched `event1`.
|
||||
- last_match — Set the base point to the last matched `event1`.
|
||||
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `timestamp` — Name of the column containing the timestamp. Data types supported: [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) and other unsigned integer types.
|
||||
@ -553,11 +553,11 @@ The query statement searching the event following A->B:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test_flow (
|
||||
dt DateTime,
|
||||
id int,
|
||||
dt DateTime,
|
||||
id int,
|
||||
page String)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY toYYYYMMDD(dt)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY toYYYYMMDD(dt)
|
||||
ORDER BY id;
|
||||
|
||||
INSERT INTO test_flow VALUES (1, 1, 'A') (2, 1, 'B') (3, 1, 'C') (4, 1, 'D') (5, 1, 'E');
|
||||
@ -585,21 +585,21 @@ INSERT INTO test_flow VALUES (1, 3, 'Gift') (2, 3, 'Home') (3, 3, 'Gift') (4, 3,
|
||||
|
||||
``` sql
|
||||
SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'Home', page = 'Home', page = 'Gift') FROM test_flow GROUP BY id;
|
||||
|
||||
|
||||
dt id page
|
||||
1970-01-01 09:00:01 1 Home // Base point, Matched with Home
|
||||
1970-01-01 09:00:02 1 Gift // Matched with Gift
|
||||
1970-01-01 09:00:03 1 Exit // The result
|
||||
1970-01-01 09:00:03 1 Exit // The result
|
||||
|
||||
1970-01-01 09:00:01 2 Home // Base point, Matched with Home
|
||||
1970-01-01 09:00:02 2 Home // Unmatched with Gift
|
||||
1970-01-01 09:00:03 2 Gift
|
||||
1970-01-01 09:00:04 2 Basket
|
||||
|
||||
1970-01-01 09:00:04 2 Basket
|
||||
|
||||
1970-01-01 09:00:01 3 Gift // Base point, Unmatched with Home
|
||||
1970-01-01 09:00:02 3 Home
|
||||
1970-01-01 09:00:03 3 Gift
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
1970-01-01 09:00:02 3 Home
|
||||
1970-01-01 09:00:03 3 Gift
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
```
|
||||
|
||||
**Behavior for `backward` and `tail`**
|
||||
@ -611,14 +611,14 @@ SELECT id, sequenceNextNode('backward', 'tail')(dt, page, page = 'Basket', page
|
||||
1970-01-01 09:00:01 1 Home
|
||||
1970-01-01 09:00:02 1 Gift
|
||||
1970-01-01 09:00:03 1 Exit // Base point, Unmatched with Basket
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home // The result
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home // The result
|
||||
1970-01-01 09:00:03 2 Gift // Matched with Gift
|
||||
1970-01-01 09:00:04 2 Basket // Base point, Matched with Basket
|
||||
|
||||
|
||||
1970-01-01 09:00:01 3 Gift
|
||||
1970-01-01 09:00:02 3 Home // The result
|
||||
1970-01-01 09:00:02 3 Home // The result
|
||||
1970-01-01 09:00:03 3 Gift // Base point, Matched with Gift
|
||||
1970-01-01 09:00:04 3 Basket // Base point, Matched with Basket
|
||||
```
|
||||
@ -633,16 +633,16 @@ SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', p
|
||||
1970-01-01 09:00:01 1 Home
|
||||
1970-01-01 09:00:02 1 Gift // Base point
|
||||
1970-01-01 09:00:03 1 Exit // The result
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home
|
||||
1970-01-01 09:00:03 2 Gift // Base point
|
||||
1970-01-01 09:00:04 2 Basket The result
|
||||
|
||||
|
||||
1970-01-01 09:00:01 3 Gift // Base point
|
||||
1970-01-01 09:00:02 3 Home // The result
|
||||
1970-01-01 09:00:03 3 Gift
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
1970-01-01 09:00:03 3 Gift
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
```
|
||||
|
||||
``` sql
|
||||
@ -652,16 +652,16 @@ SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', p
|
||||
1970-01-01 09:00:01 1 Home
|
||||
1970-01-01 09:00:02 1 Gift // Base point
|
||||
1970-01-01 09:00:03 1 Exit // Unmatched with Home
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home
|
||||
1970-01-01 09:00:03 2 Gift // Base point
|
||||
1970-01-01 09:00:04 2 Basket // Unmatched with Home
|
||||
|
||||
|
||||
1970-01-01 09:00:01 3 Gift // Base point
|
||||
1970-01-01 09:00:02 3 Home // Matched with Home
|
||||
1970-01-01 09:00:03 3 Gift // The result
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
```
|
||||
|
||||
|
||||
@ -673,17 +673,17 @@ SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', p
|
||||
dt id page
|
||||
1970-01-01 09:00:01 1 Home // The result
|
||||
1970-01-01 09:00:02 1 Gift // Base point
|
||||
1970-01-01 09:00:03 1 Exit
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:03 1 Exit
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home // The result
|
||||
1970-01-01 09:00:03 2 Gift // Base point
|
||||
1970-01-01 09:00:04 2 Basket
|
||||
|
||||
1970-01-01 09:00:01 3 Gift
|
||||
1970-01-01 09:00:04 2 Basket
|
||||
|
||||
1970-01-01 09:00:01 3 Gift
|
||||
1970-01-01 09:00:02 3 Home // The result
|
||||
1970-01-01 09:00:03 3 Gift // Base point
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
1970-01-01 09:00:03 3 Gift // Base point
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
```
|
||||
|
||||
``` sql
|
||||
@ -692,17 +692,17 @@ SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', p
|
||||
dt id page
|
||||
1970-01-01 09:00:01 1 Home // Matched with Home, the result is null
|
||||
1970-01-01 09:00:02 1 Gift // Base point
|
||||
1970-01-01 09:00:03 1 Exit
|
||||
|
||||
1970-01-01 09:00:03 1 Exit
|
||||
|
||||
1970-01-01 09:00:01 2 Home // The result
|
||||
1970-01-01 09:00:02 2 Home // Matched with Home
|
||||
1970-01-01 09:00:03 2 Gift // Base point
|
||||
1970-01-01 09:00:04 2 Basket
|
||||
|
||||
1970-01-01 09:00:04 2 Basket
|
||||
|
||||
1970-01-01 09:00:01 3 Gift // The result
|
||||
1970-01-01 09:00:02 3 Home // Matched with Home
|
||||
1970-01-01 09:00:03 3 Gift // Base point
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
1970-01-01 09:00:03 3 Gift // Base point
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
```
|
||||
|
||||
|
||||
@ -726,39 +726,39 @@ INSERT INTO test_flow_basecond VALUES (1, 1, 'A', 'ref4') (2, 1, 'A', 'ref3') (3
|
||||
``` sql
|
||||
SELECT id, sequenceNextNode('forward', 'head')(dt, page, ref = 'ref1', page = 'A') FROM test_flow_basecond GROUP BY id;
|
||||
|
||||
dt id page ref
|
||||
dt id page ref
|
||||
1970-01-01 09:00:01 1 A ref4 // The head can not be base point because the ref column of the head unmatched with 'ref1'.
|
||||
1970-01-01 09:00:02 1 A ref3
|
||||
1970-01-01 09:00:03 1 B ref2
|
||||
1970-01-01 09:00:04 1 B ref1
|
||||
1970-01-01 09:00:02 1 A ref3
|
||||
1970-01-01 09:00:03 1 B ref2
|
||||
1970-01-01 09:00:04 1 B ref1
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT id, sequenceNextNode('backward', 'tail')(dt, page, ref = 'ref4', page = 'B') FROM test_flow_basecond GROUP BY id;
|
||||
|
||||
dt id page ref
|
||||
dt id page ref
|
||||
1970-01-01 09:00:01 1 A ref4
|
||||
1970-01-01 09:00:02 1 A ref3
|
||||
1970-01-01 09:00:03 1 B ref2
|
||||
1970-01-01 09:00:02 1 A ref3
|
||||
1970-01-01 09:00:03 1 B ref2
|
||||
1970-01-01 09:00:04 1 B ref1 // The tail can not be base point because the ref column of the tail unmatched with 'ref4'.
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, ref = 'ref3', page = 'A') FROM test_flow_basecond GROUP BY id;
|
||||
|
||||
dt id page ref
|
||||
dt id page ref
|
||||
1970-01-01 09:00:01 1 A ref4 // This row can not be base point because the ref column unmatched with 'ref3'.
|
||||
1970-01-01 09:00:02 1 A ref3 // Base point
|
||||
1970-01-01 09:00:03 1 B ref2 // The result
|
||||
1970-01-01 09:00:04 1 B ref1
|
||||
1970-01-01 09:00:04 1 B ref1
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, ref = 'ref2', page = 'B') FROM test_flow_basecond GROUP BY id;
|
||||
|
||||
dt id page ref
|
||||
dt id page ref
|
||||
1970-01-01 09:00:01 1 A ref4
|
||||
1970-01-01 09:00:02 1 A ref3 // The result
|
||||
1970-01-01 09:00:03 1 B ref2 // Base point
|
||||
1970-01-01 09:00:04 1 B ref1 // This row can not be base point because the ref column unmatched with 'ref2'.
|
||||
1970-01-01 09:00:04 1 B ref1 // This row can not be base point because the ref column unmatched with 'ref2'.
|
||||
```
|
||||
|
@ -47,7 +47,7 @@ Query:
|
||||
CREATE table test (t UInt8) ENGINE = Memory;
|
||||
```
|
||||
|
||||
Get the arithmetic mean:
|
||||
Get the arithmetic mean:
|
||||
|
||||
Query:
|
||||
|
||||
|
@ -19,7 +19,7 @@ avgWeighted(x, weight)
|
||||
|
||||
`x` and `weight` must both be
|
||||
[Integer](../../../sql-reference/data-types/int-uint.md),
|
||||
[floating-point](../../../sql-reference/data-types/float.md), or
|
||||
[floating-point](../../../sql-reference/data-types/float.md), or
|
||||
[Decimal](../../../sql-reference/data-types/decimal.md),
|
||||
but may have different types.
|
||||
|
||||
|
@ -32,7 +32,7 @@ Type: [Integer](../../data-types/int-uint.md) or [Float](../../data-types/float.
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT deltaSumTimestamp(value, timestamp)
|
||||
SELECT deltaSumTimestamp(value, timestamp)
|
||||
FROM (SELECT number AS timestamp, [0, 4, 8, 3, 0, 0, 0, 1, 3, 5][number] AS value FROM numbers(1, 10));
|
||||
```
|
||||
|
||||
|
@ -4,7 +4,7 @@ toc_priority: 114
|
||||
|
||||
# groupArraySample {#grouparraysample}
|
||||
|
||||
Creates an array of sample argument values. The size of the resulting array is limited to `max_size` elements. Argument values are selected and added to the array randomly.
|
||||
Creates an array of sample argument values. The size of the resulting array is limited to `max_size` elements. Argument values are selected and added to the array randomly.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -13,7 +13,7 @@ Applies the Mann-Whitney rank test to samples from two populations.
|
||||
mannWhitneyUTest[(alternative[, continuity_correction])](sample_data, sample_index)
|
||||
```
|
||||
|
||||
Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population.
|
||||
Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population.
|
||||
The null hypothesis is that two populations are stochastically equal. Also one-sided hypothesises can be tested. This test does not assume that data have normal distribution.
|
||||
|
||||
**Arguments**
|
||||
|
@ -4,7 +4,7 @@ toc_priority: 209
|
||||
|
||||
# quantileBFloat16 {#quantilebfloat16}
|
||||
|
||||
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. `bfloat16` is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits.
|
||||
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. `bfloat16` is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits.
|
||||
The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates `bfloat16` quantile value and converts the result to a 64-bit float by appending zero bits.
|
||||
The function is a fast quantile estimator with a relative error no more than 0.390625%.
|
||||
|
||||
@ -18,7 +18,7 @@ Alias: `medianBFloat16`
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — Column with numeric data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md).
|
||||
- `expr` — Column with numeric data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Parameters**
|
||||
|
||||
|
@ -115,7 +115,7 @@ Similar to `quantileExact`, this computes the exact [quantile](https://en.wikipe
|
||||
|
||||
All the passed values are combined into an array, which is then fully sorted, to get the exact value. The sorting [algorithm's](https://en.cppreference.com/w/cpp/algorithm/sort) complexity is `O(N·log(N))`, where `N = std::distance(first, last)` comparisons.
|
||||
|
||||
The return value depends on the quantile level and the number of elements in the selection, i.e. if the level is 0.5, then the function returns the higher median value for an even number of elements and the middle median value for an odd number of elements. Median is calculated similarly to the [median_high](https://docs.python.org/3/library/statistics.html#statistics.median_high) implementation which is used in python. For all other levels, the element at the index corresponding to the value of `level * size_of_array` is returned.
|
||||
The return value depends on the quantile level and the number of elements in the selection, i.e. if the level is 0.5, then the function returns the higher median value for an even number of elements and the middle median value for an odd number of elements. Median is calculated similarly to the [median_high](https://docs.python.org/3/library/statistics.html#statistics.median_high) implementation which is used in python. For all other levels, the element at the index corresponding to the value of `level * size_of_array` is returned.
|
||||
|
||||
This implementation behaves exactly similar to the current `quantileExact` implementation.
|
||||
|
||||
@ -214,7 +214,7 @@ Result:
|
||||
|
||||
## quantileExactInclusive {#quantileexactinclusive}
|
||||
|
||||
Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
|
||||
Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence.
|
||||
|
||||
To get exact value, all the passed values are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective.
|
||||
|
||||
|
@ -16,7 +16,7 @@ Exactly computes the [quantiles](https://en.wikipedia.org/wiki/Quantile) of a nu
|
||||
|
||||
To get exact value, all the passed values are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective.
|
||||
|
||||
This function is equivalent to [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba) Excel function, ([type R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)).
|
||||
This function is equivalent to [PERCENTILE.EXC](https://support.microsoft.com/en-us/office/percentile-exc-function-bbaa7204-e9e1-4010-85bf-c31dc5dce4ba) Excel function, ([type R6](https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample)).
|
||||
|
||||
Works more efficiently with sets of levels than [quantileExactExclusive](../../../sql-reference/aggregate-functions/reference/quantileexact.md#quantileexactexclusive).
|
||||
|
||||
|
@ -5,7 +5,7 @@ toc_title: studentTTest
|
||||
|
||||
# studentTTest {#studentttest}
|
||||
|
||||
Applies Student's t-test to samples from two populations.
|
||||
Applies Student's t-test to samples from two populations.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -12,7 +12,7 @@ Calculates the sum of the numbers and counts the number of rows at the same time
|
||||
sumCount(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `x` — Input value, must be [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), or [Decimal](../../../sql-reference/data-types/decimal.md).
|
||||
|
||||
|
@ -15,13 +15,13 @@ The compensation works only for [Float](../../../sql-reference/data-types/float.
|
||||
sumKahan(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `x` — Input value, must be [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), or [Decimal](../../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- the sum of numbers, with type [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), or [Decimal](../../../sql-reference/data-types/decimal.md) depends on type of input arguments
|
||||
- the sum of numbers, with type [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), or [Decimal](../../../sql-reference/data-types/decimal.md) depends on type of input arguments
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -5,7 +5,7 @@ toc_title: welchTTest
|
||||
|
||||
# welchTTest {#welchttest}
|
||||
|
||||
Applies Welch's t-test to samples from two populations.
|
||||
Applies Welch's t-test to samples from two populations.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -45,7 +45,7 @@ SELECT [1, 2] AS x, toTypeName(x)
|
||||
|
||||
## Working with Data Types {#working-with-data-types}
|
||||
|
||||
The maximum size of an array is limited to one million elements.
|
||||
The maximum size of an array is limited to one million elements.
|
||||
|
||||
When creating an array on the fly, ClickHouse automatically defines the argument type as the narrowest data type that can store all the listed arguments. If there are any [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) or literal [NULL](../../sql-reference/syntax.md#null-literal) values, the type of an array element also becomes [Nullable](../../sql-reference/data-types/nullable.md).
|
||||
|
||||
|
@ -5,7 +5,7 @@ toc_title: Geo
|
||||
|
||||
# Geo Data Types {#geo-data-types}
|
||||
|
||||
ClickHouse supports data types for representing geographical objects — locations, lands, etc.
|
||||
ClickHouse supports data types for representing geographical objects — locations, lands, etc.
|
||||
|
||||
!!! warning "Warning"
|
||||
Currently geo data types are an experimental feature. To work with them you must set `allow_experimental_geo_types = 1`.
|
||||
@ -28,7 +28,7 @@ CREATE TABLE geo_point (p Point) ENGINE = Memory();
|
||||
INSERT INTO geo_point VALUES((10, 10));
|
||||
SELECT p, toTypeName(p) FROM geo_point;
|
||||
```
|
||||
Result:
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─p─────┬─toTypeName(p)─┐
|
||||
@ -50,7 +50,7 @@ CREATE TABLE geo_ring (r Ring) ENGINE = Memory();
|
||||
INSERT INTO geo_ring VALUES([(0, 0), (10, 0), (10, 10), (0, 10)]);
|
||||
SELECT r, toTypeName(r) FROM geo_ring;
|
||||
```
|
||||
Result:
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─r─────────────────────────────┬─toTypeName(r)─┐
|
||||
@ -73,7 +73,7 @@ INSERT INTO geo_polygon VALUES([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30,
|
||||
SELECT pg, toTypeName(pg) FROM geo_polygon;
|
||||
```
|
||||
|
||||
Result:
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─pg────────────────────────────────────────────────────────────┬─toTypeName(pg)─┐
|
||||
@ -83,7 +83,7 @@ Result:
|
||||
|
||||
## MultiPolygon {#multipolygon-data-type}
|
||||
|
||||
`MultiPolygon` consists of multiple polygons and is stored as an array of polygons: [Array](array.md)([Polygon](#polygon-data-type)).
|
||||
`MultiPolygon` consists of multiple polygons and is stored as an array of polygons: [Array](array.md)([Polygon](#polygon-data-type)).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -95,7 +95,7 @@ CREATE TABLE geo_multipolygon (mpg MultiPolygon) ENGINE = Memory();
|
||||
INSERT INTO geo_multipolygon VALUES([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]);
|
||||
SELECT mpg, toTypeName(mpg) FROM geo_multipolygon;
|
||||
```
|
||||
Result:
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─mpg─────────────────────────────────────────────────────────────────────────────────────────────┬─toTypeName(mpg)─┐
|
||||
|
@ -7,7 +7,7 @@ toc_title: UInt8, UInt16, UInt32, UInt64, UInt256, Int8, Int16, Int32, Int64, In
|
||||
|
||||
Fixed-length integers, with or without a sign.
|
||||
|
||||
When creating tables, numeric parameters for integer numbers can be set (e.g. `TINYINT(8)`, `SMALLINT(16)`, `INT(32)`, `BIGINT(64)`), but ClickHouse ignores them.
|
||||
When creating tables, numeric parameters for integer numbers can be set (e.g. `TINYINT(8)`, `SMALLINT(16)`, `INT(32)`, `BIGINT(64)`), but ClickHouse ignores them.
|
||||
|
||||
## Int Ranges {#int-ranges}
|
||||
|
||||
|
@ -5,9 +5,9 @@ toc_title: Map(key, value)
|
||||
|
||||
# Map(key, value) {#data_type-map}
|
||||
|
||||
`Map(key, value)` data type stores `key:value` pairs.
|
||||
`Map(key, value)` data type stores `key:value` pairs.
|
||||
|
||||
**Parameters**
|
||||
**Parameters**
|
||||
|
||||
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [Array](../../sql-reference/data-types/array.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
@ -23,7 +23,7 @@ CREATE TABLE table_map (a Map(String, UInt64)) ENGINE=Memory;
|
||||
INSERT INTO table_map VALUES ({'key1':1, 'key2':10}), ({'key1':2,'key2':20}), ({'key1':3,'key2':30});
|
||||
```
|
||||
|
||||
Select all `key2` values:
|
||||
Select all `key2` values:
|
||||
|
||||
```sql
|
||||
SELECT a['key2'] FROM table_map;
|
||||
@ -38,7 +38,7 @@ Result:
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
If there's no such `key` in the `Map()` column, the query returns zeros for numerical values, empty strings or empty arrays.
|
||||
If there's no such `key` in the `Map()` column, the query returns zeros for numerical values, empty strings or empty arrays.
|
||||
|
||||
```sql
|
||||
INSERT INTO table_map VALUES ({'key3':100}), ({});
|
||||
|
@ -24,7 +24,7 @@ The following aggregate functions are supported:
|
||||
|
||||
!!! note "Note"
|
||||
Values of the `SimpleAggregateFunction(func, Type)` look and stored the same way as `Type`, so you do not need to apply functions with `-Merge`/`-State` suffixes.
|
||||
|
||||
|
||||
`SimpleAggregateFunction` has better performance than `AggregateFunction` with same aggregation function.
|
||||
|
||||
**Parameters**
|
||||
|
@ -8,7 +8,7 @@ toc_title: String
|
||||
Strings of an arbitrary length. The length is not limited. The value can contain an arbitrary set of bytes, including null bytes.
|
||||
The String type replaces the types VARCHAR, BLOB, CLOB, and others from other DBMSs.
|
||||
|
||||
When creating tables, numeric parameters for string fields can be set (e.g. `VARCHAR(255)`), but ClickHouse ignores them.
|
||||
When creating tables, numeric parameters for string fields can be set (e.g. `VARCHAR(255)`), but ClickHouse ignores them.
|
||||
|
||||
## Encodings {#encodings}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_priority: 46
|
||||
toc_title: Polygon Dictionaries With Grids
|
||||
toc_title: Polygon Dictionaries With Grids
|
||||
---
|
||||
|
||||
|
||||
|
@ -267,7 +267,7 @@ bitHammingDistance(int1, int2)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The Hamming distance.
|
||||
- The Hamming distance.
|
||||
|
||||
Type: [UInt8](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
|
@ -85,7 +85,7 @@ hex(arg)
|
||||
|
||||
The function is using uppercase letters `A-F` and not using any prefixes (like `0x`) or suffixes (like `h`).
|
||||
|
||||
For integer arguments, it prints hex digits (“nibbles”) from the most significant to least significant (big endian or “human readable” order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints both digits of every byte even if leading digit is zero.
|
||||
For integer arguments, it prints hex digits (“nibbles”) from the most significant to least significant (big-endian or “human-readable” order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints both digits of every byte even if the leading digit is zero.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -105,7 +105,7 @@ Values of type `Date` and `DateTime` are formatted as corresponding integers (th
|
||||
|
||||
For `String` and `FixedString`, all bytes are simply encoded as two hexadecimal numbers. Zero bytes are not omitted.
|
||||
|
||||
Values of floating point and Decimal types are encoded as their representation in memory. As we support little endian architecture, they are encoded in little endian. Zero leading/trailing bytes are not omitted.
|
||||
Values of floating point and Decimal types are encoded as their representation in memory. As we support little-endian architecture, they are encoded in little-endian. Zero leading/trailing bytes are not omitted.
|
||||
|
||||
**Arguments**
|
||||
|
||||
@ -206,6 +206,141 @@ Result:
|
||||
└──────┘
|
||||
```
|
||||
|
||||
## bin {#bin}
|
||||
|
||||
Returns a string containing the argument’s binary representation.
|
||||
|
||||
Alias: `BIN`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
bin(arg)
|
||||
```
|
||||
|
||||
For integer arguments, it prints bin digits from the most significant to least significant (big-endian or “human-readable” order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints eight digits of every byte if the leading digit is zero.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT bin(1);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
00000001
|
||||
```
|
||||
|
||||
Values of type `Date` and `DateTime` are formatted as corresponding integers (the number of days since Epoch for Date and the value of Unix Timestamp for DateTime).
|
||||
|
||||
For `String` and `FixedString`, all bytes are simply encoded as eight binary numbers. Zero bytes are not omitted.
|
||||
|
||||
Values of floating-point and Decimal types are encoded as their representation in memory. As we support little-endian architecture, they are encoded in little-endian. Zero leading/trailing bytes are not omitted.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `arg` — A value to convert to binary. Types: [String](../../sql-reference/data-types/string.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md), [Decimal](../../sql-reference/data-types/decimal.md), [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A string with the binary representation of the argument.
|
||||
|
||||
Type: `String`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT bin(toFloat32(number)) as bin_presentation FROM numbers(15, 2);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─bin_presentation─────────────────┐
|
||||
│ 00000000000000000111000001000001 │
|
||||
│ 00000000000000001000000001000001 │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT bin(toFloat64(number)) as bin_presentation FROM numbers(15, 2);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─bin_presentation─────────────────────────────────────────────────┐
|
||||
│ 0000000000000000000000000000000000000000000000000010111001000000 │
|
||||
│ 0000000000000000000000000000000000000000000000000011000001000000 │
|
||||
└──────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## unbin {#unbinstr}
|
||||
|
||||
Performs the opposite operation of [bin](#bin). It interprets each pair of binary digits (in the argument) as a number and converts it to the byte represented by the number. The return value is a binary string (BLOB).
|
||||
|
||||
If you want to convert the result to a number, you can use the [reverse](../../sql-reference/functions/string-functions.md#reverse) and [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#type-conversion-functions) functions.
|
||||
|
||||
!!! note "Note"
|
||||
If `unbin` is invoked from within the `clickhouse-client`, binary strings display using UTF-8.
|
||||
|
||||
Alias: `UNBIN`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
unbin(arg)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `arg` — A string containing any number of binary digits. Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
Supports binary digits `0-1`. The number of binary digits does not have to be multiples of eight. If the argument string contains anything other than binary digits, some implementation-defined result is returned (an exception isn’t thrown). For a numeric argument the inverse of bin(N) is not performed by unbin().
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A binary string (BLOB).
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT UNBIN('001100000011000100110010'), UNBIN('0100110101111001010100110101000101001100');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─unbin('001100000011000100110010')─┬─unbin('0100110101111001010100110101000101001100')─┐
|
||||
│ 012 │ MySQL │
|
||||
└───────────────────────────────────┴───────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT reinterpretAsUInt64(reverse(unbin('1010'))) AS num;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─num─┐
|
||||
│ 10 │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
## UUIDStringToNum(str) {#uuidstringtonumstr}
|
||||
|
||||
Accepts a string containing 36 characters in the format `123e4567-e89b-12d3-a456-426655440000`, and returns it as a set of bytes in a FixedString(16).
|
||||
|
@ -9,7 +9,7 @@ These functions implement encryption and decryption of data with AES (Advanced
|
||||
|
||||
Key length depends on encryption mode. It is 16, 24, and 32 bytes long for `-128-`, `-196-`, and `-256-` modes respectively.
|
||||
|
||||
Initialization vector length is always 16 bytes (bytes in excess of 16 are ignored).
|
||||
Initialization vector length is always 16 bytes (bytes in excess of 16 are ignored).
|
||||
|
||||
Note that these functions work slowly until ClickHouse 21.1.
|
||||
|
||||
@ -168,7 +168,7 @@ Result:
|
||||
|
||||
``` text
|
||||
Received exception from server (version 21.1.2):
|
||||
Code: 36. DB::Exception: Received from localhost:9000. DB::Exception: Invalid key size: 33 expected 32: While processing encrypt('aes-256-cfb128', 'Secret', '123456789101213141516171819202122', 'iviviviviviviviv123').
|
||||
Code: 36. DB::Exception: Received from localhost:9000. DB::Exception: Invalid key size: 33 expected 32: While processing encrypt('aes-256-cfb128', 'Secret', '123456789101213141516171819202122', 'iviviviviviviviv123').
|
||||
```
|
||||
|
||||
While `aes_encrypt_mysql` produces MySQL-compatitalbe output:
|
||||
|
@ -4,7 +4,7 @@ toc_title: Geohash
|
||||
|
||||
# Functions for Working with Geohash {#geohash}
|
||||
|
||||
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earth’s surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location.
|
||||
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earth’s surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location.
|
||||
|
||||
If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/).
|
||||
|
||||
|
@ -4,15 +4,15 @@ toc_title: H3 Indexes
|
||||
|
||||
# Functions for Working with H3 Indexes {#h3index}
|
||||
|
||||
[H3](https://eng.uber.com/h3/) is a geographical indexing system where Earth’s surface divided into a grid of even hexagonal cells. This system is hierarchical, i. e. each hexagon on the top level ("parent") can be splitted into seven even but smaller ones ("children"), and so on.
|
||||
[H3](https://eng.uber.com/h3/) is a geographical indexing system where Earth’s surface divided into a grid of even hexagonal cells. This system is hierarchical, i. e. each hexagon on the top level ("parent") can be splitted into seven even but smaller ones ("children"), and so on.
|
||||
|
||||
The level of the hierarchy is called `resolution` and can receive a value from `0` till `15`, where `0` is the `base` level with the largest and coarsest cells.
|
||||
The level of the hierarchy is called `resolution` and can receive a value from `0` till `15`, where `0` is the `base` level with the largest and coarsest cells.
|
||||
|
||||
A latitude and longitude pair can be transformed to a 64-bit H3 index, identifying a grid cell.
|
||||
|
||||
The H3 index is used primarily for bucketing locations and other geospatial manipulations.
|
||||
|
||||
The full description of the H3 system is available at [the Uber Engeneering site](https://eng.uber.com/h3/).
|
||||
The full description of the H3 system is available at [the Uber Engeneering site](https://eng.uber.com/h3/).
|
||||
|
||||
## h3IsValid {#h3isvalid}
|
||||
|
||||
@ -142,7 +142,7 @@ h3EdgeLengthM(resolution)
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3EdgeLengthM(15) as edgeLengthM;
|
||||
|
@ -48,7 +48,7 @@ Functions can’t change the values of their arguments – any changes are retur
|
||||
|
||||
Higher-order functions can only accept lambda functions as their functional argument. To pass a lambda function to a higher-order function use `->` operator. The left side of the arrow has a formal parameter, which is any ID, or multiple formal parameters – any IDs in a tuple. The right side of the arrow has an expression that can use these formal parameters, as well as any table columns.
|
||||
|
||||
Examples:
|
||||
Examples:
|
||||
|
||||
```
|
||||
x -> 2 * x
|
||||
|
@ -53,7 +53,7 @@ Since using ‘xxx’ is highly unusual, this may be changed in the future. We r
|
||||
### IPv6NumToString(x) {#ipv6numtostringx}
|
||||
|
||||
Accepts a FixedString(16) value containing the IPv6 address in binary format. Returns a string containing this address in text format.
|
||||
IPv6-mapped IPv4 addresses are output in the format ::ffff:111.222.33.44.
|
||||
IPv6-mapped IPv4 addresses are output in the format ::ffff:111.222.33.44.
|
||||
|
||||
Alias: `INET6_NTOA`.
|
||||
|
||||
@ -123,7 +123,7 @@ LIMIT 10
|
||||
|
||||
## IPv6StringToNum {#ipv6stringtonums}
|
||||
|
||||
The reverse function of [IPv6NumToString](#ipv6numtostringx). If the IPv6 address has an invalid format, it returns a string of null bytes.
|
||||
The reverse function of [IPv6NumToString](#ipv6numtostringx). If the IPv6 address has an invalid format, it returns a string of null bytes.
|
||||
|
||||
If the input string contains a valid IPv4 address, returns its IPv6 equivalent.
|
||||
HEX can be uppercase or lowercase.
|
||||
@ -136,13 +136,13 @@ Alias: `INET6_ATON`.
|
||||
IPv6StringToNum(string)
|
||||
```
|
||||
|
||||
**Argument**
|
||||
**Argument**
|
||||
|
||||
- `string` — IP address. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- IPv6 address in binary format.
|
||||
- IPv6 address in binary format.
|
||||
|
||||
Type: [FixedString(16)](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
@ -280,7 +280,7 @@ toIPv6(string)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- IP address.
|
||||
- IP address.
|
||||
|
||||
Type: [IPv6](../../sql-reference/data-types/domains/ipv6.md).
|
||||
|
||||
|
@ -325,7 +325,7 @@ toJSONString(value)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- JSON representation of the value.
|
||||
- JSON representation of the value.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
|
@ -21,7 +21,7 @@ and(val1, val2...)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val1, val2, ...` — List of at least two values. [Int](../../sql-reference/data-types/int-uint.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md).
|
||||
- `val1, val2, ...` — List of at least two values. [Int](../../sql-reference/data-types/int-uint.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -73,7 +73,7 @@ and(val1, val2...)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val1, val2, ...` — List of at least two values. [Int](../../sql-reference/data-types/int-uint.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md).
|
||||
- `val1, val2, ...` — List of at least two values. [Int](../../sql-reference/data-types/int-uint.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -125,7 +125,7 @@ not(val);
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` — The value. [Int](../../sql-reference/data-types/int-uint.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md).
|
||||
- `val` — The value. [Int](../../sql-reference/data-types/int-uint.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -163,11 +163,11 @@ xor(val1, val2...)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val1, val2, ...` — List of at least two values. [Int](../../sql-reference/data-types/int-uint.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md).
|
||||
- `val1, val2, ...` — List of at least two values. [Int](../../sql-reference/data-types/int-uint.md), [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Nullable](../../sql-reference/data-types/nullable.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `1`, for two values: if one of the values is zero and other is not.
|
||||
- `1`, for two values: if one of the values is zero and other is not.
|
||||
- `0`, for two values: if both values are zero or non-zero at the same time.
|
||||
- `NULL`, if there is at least one `NULL` value.
|
||||
|
||||
|
@ -21,13 +21,13 @@ The [stochasticLogisticRegression](../../sql-reference/aggregate-functions/refer
|
||||
|
||||
Compares test groups (variants) and calculates for each group the probability to be the best one. The first group is used as a control group.
|
||||
|
||||
**Syntax**
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
bayesAB(distribution_name, higher_is_better, variant_names, x, y)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `distribution_name` — Name of the probability distribution. [String](../../sql-reference/data-types/string.md). Possible values:
|
||||
|
||||
|
@ -170,13 +170,13 @@ SELECT alphaTokens('abca1abc');
|
||||
|
||||
Extracts all groups from non-overlapping substrings matched by a regular expression.
|
||||
|
||||
**Syntax**
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
extractAllGroups(text, regexp)
|
||||
extractAllGroups(text, regexp)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `text` — [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `regexp` — Regular expression. Constant. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
|
@ -504,7 +504,7 @@ Replaces literals, sequences of literals and complex aliases with placeholders.
|
||||
normalizeQuery(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `x` — Sequence of characters. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
@ -534,13 +534,13 @@ Result:
|
||||
|
||||
Returns identical 64bit hash values without the values of literals for similar queries. It helps to analyze query log.
|
||||
|
||||
**Syntax**
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
normalizedQueryHash(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `x` — Sequence of characters. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
@ -572,13 +572,13 @@ Escapes characters to place string into XML text node or attribute.
|
||||
|
||||
The following five XML predefined entities will be replaced: `<`, `&`, `>`, `"`, `'`.
|
||||
|
||||
**Syntax**
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
encodeXMLComponent(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `x` — The sequence of characters. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
@ -641,7 +641,7 @@ SELECT decodeXMLComponent('< Σ >');
|
||||
Result:
|
||||
|
||||
``` text
|
||||
'foo'
|
||||
'foo'
|
||||
< Σ >
|
||||
```
|
||||
|
||||
@ -683,7 +683,7 @@ extractTextFromHTML(x)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — input text. [String](../../sql-reference/data-types/string.md).
|
||||
- `x` — input text. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -14,7 +14,7 @@ The search is case-sensitive by default in all these functions. There are separa
|
||||
|
||||
Searches for the substring `needle` in the string `haystack`.
|
||||
|
||||
Returns the position (in bytes) of the found substring in the string, starting from 1.
|
||||
Returns the position (in bytes) of the found substring in the string, starting from 1.
|
||||
|
||||
For a case-insensitive search, use the function [positionCaseInsensitive](#positioncaseinsensitive).
|
||||
|
||||
@ -22,11 +22,11 @@ For a case-insensitive search, use the function [positionCaseInsensitive](#posit
|
||||
|
||||
``` sql
|
||||
position(haystack, needle[, start_pos])
|
||||
```
|
||||
```
|
||||
|
||||
``` sql
|
||||
position(needle IN haystack)
|
||||
```
|
||||
```
|
||||
|
||||
Alias: `locate(haystack, needle[, start_pos])`.
|
||||
|
||||
@ -399,27 +399,27 @@ Extracts all the fragments of a string using a regular expression. If ‘haystac
|
||||
|
||||
## extractAllGroupsHorizontal {#extractallgroups-horizontal}
|
||||
|
||||
Matches all groups of the `haystack` string using the `pattern` regular expression. Returns an array of arrays, where the first array includes all fragments matching the first group, the second array - matching the second group, etc.
|
||||
Matches all groups of the `haystack` string using the `pattern` regular expression. Returns an array of arrays, where the first array includes all fragments matching the first group, the second array - matching the second group, etc.
|
||||
|
||||
!!! note "Note"
|
||||
`extractAllGroupsHorizontal` function is slower than [extractAllGroupsVertical](#extractallgroups-vertical).
|
||||
|
||||
**Syntax**
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
extractAllGroupsHorizontal(haystack, pattern)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — Input string. Type: [String](../../sql-reference/data-types/string.md).
|
||||
- `pattern` — Regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. Type: [String](../../sql-reference/data-types/string.md).
|
||||
- `pattern` — Regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Type: [Array](../../sql-reference/data-types/array.md).
|
||||
|
||||
If `haystack` does not match the `pattern` regex, an array of empty arrays is returned.
|
||||
If `haystack` does not match the `pattern` regex, an array of empty arrays is returned.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -445,13 +445,13 @@ Result:
|
||||
|
||||
Matches all groups of the `haystack` string using the `pattern` regular expression. Returns an array of arrays, where each array includes matching fragments from every group. Fragments are grouped in order of appearance in the `haystack`.
|
||||
|
||||
**Syntax**
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
extractAllGroupsVertical(haystack, pattern)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — Input string. Type: [String](../../sql-reference/data-types/string.md).
|
||||
- `pattern` — Regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. Type: [String](../../sql-reference/data-types/string.md).
|
||||
@ -460,7 +460,7 @@ extractAllGroupsVertical(haystack, pattern)
|
||||
|
||||
- Type: [Array](../../sql-reference/data-types/array.md).
|
||||
|
||||
If `haystack` does not match the `pattern` regex, an empty array is returned.
|
||||
If `haystack` does not match the `pattern` regex, an empty array is returned.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -731,7 +731,7 @@ SELECT countSubstringsCaseInsensitiveUTF8(haystack, needle[, start_pos])
|
||||
|
||||
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Examples**
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
|
@ -9,13 +9,13 @@ toc_title: Working with maps
|
||||
|
||||
Arranges `key:value` pairs into [Map(key, value)](../../sql-reference/data-types/map.md) data type.
|
||||
|
||||
**Syntax**
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
map(key1, value1[, key2, value2, ...])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md) or [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
- `value` — The value part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) or [Array](../../sql-reference/data-types/array.md).
|
||||
@ -62,7 +62,7 @@ Result:
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
**See Also**
|
||||
|
||||
- [Map(key, value)](../../sql-reference/data-types/map.md) data type
|
||||
|
||||
@ -109,13 +109,13 @@ Query with `Map` type:
|
||||
|
||||
Collect all the keys and subtract corresponding values.
|
||||
|
||||
**Syntax**
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Arguments**
|
||||
|
||||
Arguments are [tuples](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array), where items in the first array represent keys, and the second array contains values for the each key. All key arrays should have same type, and all value arrays should contain items which are promote to the one type ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) or [Float64](../../sql-reference/data-types/float.md#float32-float64)). The common promoted type is used as a type for the result array.
|
||||
|
||||
@ -143,7 +143,7 @@ Result:
|
||||
|
||||
Fills missing keys in the maps (key and value array pair), where keys are integers. Also, it supports specifying the max key, which is used to extend the keys array.
|
||||
|
||||
**Syntax**
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
mapPopulateSeries(keys, values[, max])
|
||||
@ -188,7 +188,7 @@ Determines whether the `map` contains the `key` parameter.
|
||||
mapContains(map, key)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
**Parameters**
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `key` — Key. Type matches the type of keys of `map` parameter.
|
||||
|
@ -373,7 +373,7 @@ This function accepts a number or date or date with time, and returns a FixedStr
|
||||
|
||||
## reinterpretAsUUID {#reinterpretasuuid}
|
||||
|
||||
Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string longer than 16 bytes, the extra bytes at the end are ignored.
|
||||
Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string longer than 16 bytes, the extra bytes at the end are ignored.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -439,8 +439,8 @@ reinterpret(x, type)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Any type.
|
||||
- `type` — Destination type. [String](../../sql-reference/data-types/string.md).
|
||||
- `x` — Any type.
|
||||
- `type` — Destination type. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -478,8 +478,8 @@ x::t
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — A value to convert. May be of any type.
|
||||
- `T` — The name of the target data type. [String](../../sql-reference/data-types/string.md).
|
||||
- `x` — A value to convert. May be of any type.
|
||||
- `T` — The name of the target data type. [String](../../sql-reference/data-types/string.md).
|
||||
- `t` — The target data type.
|
||||
|
||||
**Returned value**
|
||||
@ -529,7 +529,7 @@ Result:
|
||||
|
||||
Conversion to FixedString(N) only works for arguments of type [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
Type conversion to [Nullable](../../sql-reference/data-types/nullable.md) and back is supported.
|
||||
Type conversion to [Nullable](../../sql-reference/data-types/nullable.md) and back is supported.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -569,7 +569,7 @@ Result:
|
||||
|
||||
## accurateCast(x, T) {#type_conversion_function-accurate-cast}
|
||||
|
||||
Converts `x` to the `T` data type.
|
||||
Converts `x` to the `T` data type.
|
||||
|
||||
The difference from [cast(x, T)](#type_conversion_function-cast) is that `accurateCast` does not allow overflow of numeric types during cast if type value `x` does not fit the bounds of type `T`. For example, `accurateCast(-1, 'UInt8')` throws an exception.
|
||||
|
||||
@ -1170,7 +1170,7 @@ Result:
|
||||
|
||||
## toUnixTimestamp64Nano {#tounixtimestamp64nano}
|
||||
|
||||
Converts a `DateTime64` to a `Int64` value with fixed sub-second precision. Input value is scaled up or down appropriately depending on it precision.
|
||||
Converts a `DateTime64` to a `Int64` value with fixed sub-second precision. Input value is scaled up or down appropriately depending on it precision.
|
||||
|
||||
!!! info "Note"
|
||||
The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
|
||||
@ -1206,7 +1206,7 @@ Result:
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64
|
||||
|
@ -283,7 +283,7 @@ SELECT firstSignificantSubdomainCustom('bar.foo.there-is-no-such-domain', 'publi
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
```text
|
||||
┌─firstSignificantSubdomainCustom('bar.foo.there-is-no-such-domain', 'public_suffix_list')─┐
|
||||
│ foo │
|
||||
└──────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
|
@ -20,12 +20,11 @@ The following actions are supported:
|
||||
|
||||
- [ADD COLUMN](#alter_add-column) — Adds a new column to the table.
|
||||
- [DROP COLUMN](#alter_drop-column) — Deletes the column.
|
||||
- [RENAME COLUMN](#alter_rename-column) — Renames the column.
|
||||
- [RENAME COLUMN](#alter_rename-column) — Renames an existing column.
|
||||
- [CLEAR COLUMN](#alter_clear-column) — Resets column values.
|
||||
- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column.
|
||||
- [MODIFY COLUMN](#alter_modify-column) — Changes column’s type, default expression and TTL.
|
||||
- [MODIFY COLUMN REMOVE](#modify-remove) — Removes one of the column properties.
|
||||
- [RENAME COLUMN](#alter_rename-column) — Renames an existing column.
|
||||
|
||||
These actions are described in detail below.
|
||||
|
||||
@ -35,7 +34,7 @@ These actions are described in detail below.
|
||||
ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST]
|
||||
```
|
||||
|
||||
Adds a new column to the table with the specified `name`, `type`, [`codec`](../../../sql-reference/statements/create/table.md#codecs) and `default_expr` (see the section [Default expressions](../../../sql-reference/statements/create/table.md#create-default-values)).
|
||||
Adds a new column to the table with the specified `name`, `type`, [`codec`](../create/table.md#codecs) and `default_expr` (see the section [Default expressions](../../../sql-reference/statements/create/table.md#create-default-values)).
|
||||
|
||||
If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. If you want to add a column to the beginning of the table use the `FIRST` clause. Otherwise, the column is added to the end of the table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions.
|
||||
|
||||
@ -64,6 +63,7 @@ Added2 UInt32
|
||||
ToDrop UInt32
|
||||
Added3 UInt32
|
||||
```
|
||||
|
||||
## DROP COLUMN {#alter_drop-column}
|
||||
|
||||
``` sql
|
||||
@ -91,7 +91,7 @@ RENAME COLUMN [IF EXISTS] name to new_name
|
||||
|
||||
Renames the column `name` to `new_name`. If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist. Since renaming does not involve the underlying data, the query is completed almost instantly.
|
||||
|
||||
**NOTE**: Columns specified in the key expression of the table (either with `ORDER BY` or `PRIMARY KEY`) cannot be renamed. Trying to change these columns will produce `SQL Error [524]`.
|
||||
**NOTE**: Columns specified in the key expression of the table (either with `ORDER BY` or `PRIMARY KEY`) cannot be renamed. Trying to change these columns will produce `SQL Error [524]`.
|
||||
|
||||
Example:
|
||||
|
||||
@ -118,7 +118,7 @@ ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple()
|
||||
## COMMENT COLUMN {#alter_comment-column}
|
||||
|
||||
``` sql
|
||||
COMMENT COLUMN [IF EXISTS] name 'comment'
|
||||
COMMENT COLUMN [IF EXISTS] name 'Text comment'
|
||||
```
|
||||
|
||||
Adds a comment to the column. If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist.
|
||||
@ -136,7 +136,7 @@ ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for
|
||||
## MODIFY COLUMN {#alter_modify-column}
|
||||
|
||||
``` sql
|
||||
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] [AFTER name_after | FIRST]
|
||||
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [codec] [TTL] [AFTER name_after | FIRST]
|
||||
```
|
||||
|
||||
This query changes the `name` column properties:
|
||||
@ -145,8 +145,12 @@ This query changes the `name` column properties:
|
||||
|
||||
- Default expression
|
||||
|
||||
- Compression Codec
|
||||
|
||||
- TTL
|
||||
|
||||
For examples of columns compression CODECS modifying, see [Column Compression Codecs](../create/table.md#codecs).
|
||||
|
||||
For examples of columns TTL modifying, see [Column TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl).
|
||||
|
||||
If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist.
|
||||
@ -179,6 +183,8 @@ ALTER TABLE table_name MODIFY column_name REMOVE property;
|
||||
|
||||
**Example**
|
||||
|
||||
Remove TTL:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
||||
```
|
||||
@ -187,22 +193,6 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
||||
|
||||
- [REMOVE TTL](ttl.md).
|
||||
|
||||
## RENAME COLUMN {#alter_rename-column}
|
||||
|
||||
Renames an existing column.
|
||||
|
||||
Syntax:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_name RENAME COLUMN column_name TO new_column_name
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
ALTER TABLE table_with_ttl RENAME COLUMN column_ttl TO column_ttl_new;
|
||||
```
|
||||
|
||||
## Limitations {#alter-query-limitations}
|
||||
|
||||
The `ALTER` query lets you create and delete separate elements (columns) in nested data structures, but not whole nested data structures. To add a nested data structure, you can add columns with a name like `name.nested_name` and the type `Array(T)`. A nested data structure is equivalent to multiple array columns with a name that has the same prefix before the dot.
|
||||
@ -213,4 +203,4 @@ If the `ALTER` query is not sufficient to make the table changes you need, you c
|
||||
|
||||
The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running.
|
||||
|
||||
For tables that do not store data themselves (such as `Merge` and `Distributed`), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers.
|
||||
For tables that do not store data themselves (such as [Merge](../../../sql-reference/statements/alter/index.md) and [Distributed](../../../sql-reference/statements/alter/index.md)), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers.
|
||||
|
@ -88,7 +88,7 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0;
|
||||
|
||||
Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
|
||||
This query is replicated. The replica-initiator checks whether there is data in the `detached` directory.
|
||||
This query is replicated. The replica-initiator checks whether there is data in the `detached` directory.
|
||||
If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table.
|
||||
|
||||
If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own `detached` folder, it attaches the data without fetching it from other replicas.
|
||||
|
@ -10,7 +10,7 @@ Changes roles.
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
ALTER ROLE [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
|
||||
ALTER ROLE [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
|
||||
[, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
|
||||
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
|
||||
```
|
||||
|
@ -10,7 +10,7 @@ Changes row policy.
|
||||
Syntax:
|
||||
|
||||
``` sql
|
||||
ALTER [ROW] POLICY [IF EXISTS] name1 [ON CLUSTER cluster_name1] ON [database1.]table1 [RENAME TO new_name1]
|
||||
ALTER [ROW] POLICY [IF EXISTS] name1 [ON CLUSTER cluster_name1] ON [database1.]table1 [RENAME TO new_name1]
|
||||
[, name2 [ON CLUSTER cluster_name2] ON [database2.]table2 [RENAME TO new_name2] ...]
|
||||
[AS {PERMISSIVE | RESTRICTIVE}]
|
||||
[FOR SELECT]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user