mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Merge branch 'master' into 40907_Parameterized_views_as_table_functions
This commit is contained in:
commit
c84a50e77c
@ -16,7 +16,9 @@ endmacro()
|
|||||||
|
|
||||||
if (SANITIZE)
|
if (SANITIZE)
|
||||||
if (SANITIZE STREQUAL "address")
|
if (SANITIZE STREQUAL "address")
|
||||||
set (ASAN_FLAGS "-fsanitize=address -fsanitize-address-use-after-scope")
|
# LLVM-15 has a bug in Address Sanitizer, preventing the usage of 'sanitize-address-use-after-scope',
|
||||||
|
# see https://github.com/llvm/llvm-project/issues/58633
|
||||||
|
set (ASAN_FLAGS "-fsanitize=address -fno-sanitize-address-use-after-scope")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}")
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}")
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@ done
|
|||||||
sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
|
sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
|
||||||
|
|
||||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||||
|
export CI=true
|
||||||
exec yarn build "$@"
|
exec yarn build "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -80,6 +80,16 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
||||||
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
|
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
|
||||||
|
|
||||||
|
# Remove as much of Ubuntu as possible.
|
||||||
|
# ClickHouse does not need Ubuntu. It can run on top of Linux kernel without any OS distribution.
|
||||||
|
# ClickHouse does not need Docker at all. ClickHouse is above all that.
|
||||||
|
# It does not care about Ubuntu, Docker, or other cruft and you should neither.
|
||||||
|
# The fact that this Docker image is based on Ubuntu is just a misconception.
|
||||||
|
# Some vulnerability scanners are arguing about Ubuntu, which is not relevant to ClickHouse at all.
|
||||||
|
# ClickHouse does not care when you report false vulnerabilities by running some Docker scanners.
|
||||||
|
|
||||||
|
RUN apt-get remove --purge -y libksba8 && apt-get autoremove -y
|
||||||
|
|
||||||
# we need to allow "others" access to clickhouse folder, because docker container
|
# we need to allow "others" access to clickhouse folder, because docker container
|
||||||
# can be started with arbitrary uid (openshift usecase)
|
# can be started with arbitrary uid (openshift usecase)
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: Integrations
|
|||||||
|
|
||||||
# Table Engines for Integrations
|
# Table Engines for Integrations
|
||||||
|
|
||||||
ClickHouse provides various means for integrating with external systems, including table engines. Like with all other table engines, the configuration is done using `CREATE TABLE` or `ALTER TABLE` queries. Then from a user perspective, the configured integration looks like a normal table, but queries to it are proxied to the external system. This transparent querying is one of the key advantages of this approach over alternative integration methods, like external dictionaries or table functions, which require to use custom query methods on each use.
|
ClickHouse provides various means for integrating with external systems, including table engines. Like with all other table engines, the configuration is done using `CREATE TABLE` or `ALTER TABLE` queries. Then from a user perspective, the configured integration looks like a normal table, but queries to it are proxied to the external system. This transparent querying is one of the key advantages of this approach over alternative integration methods, like dictionaries or table functions, which require to use custom query methods on each use.
|
||||||
|
|
||||||
List of supported integrations:
|
List of supported integrations:
|
||||||
|
|
||||||
|
@ -180,6 +180,6 @@ Default value: `300`.
|
|||||||
## See Also {#see-also}
|
## See Also {#see-also}
|
||||||
|
|
||||||
- [The mysql table function](../../../sql-reference/table-functions/mysql.md)
|
- [The mysql table function](../../../sql-reference/table-functions/mysql.md)
|
||||||
- [Using MySQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
- [Using MySQL as a dictionary source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/mysql/) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/mysql/) <!--hide-->
|
||||||
|
@ -126,7 +126,7 @@ SELECT * FROM odbc_t
|
|||||||
|
|
||||||
## See Also {#see-also}
|
## See Also {#see-also}
|
||||||
|
|
||||||
- [ODBC external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
|
- [ODBC dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
|
||||||
- [ODBC table function](../../../sql-reference/table-functions/odbc.md)
|
- [ODBC table function](../../../sql-reference/table-functions/odbc.md)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/odbc/) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/odbc/) <!--hide-->
|
||||||
|
@ -174,6 +174,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
|
|||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [The `postgresql` table function](../../../sql-reference/table-functions/postgresql.md)
|
- [The `postgresql` table function](../../../sql-reference/table-functions/postgresql.md)
|
||||||
- [Using PostgreSQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
- [Using PostgreSQL as a dictionary source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/postgresql/) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/postgresql/) <!--hide-->
|
||||||
|
@ -163,7 +163,7 @@ SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10
|
|||||||
|
|
||||||
Based on the above query and the [MCC list](https://en.wikipedia.org/wiki/Mobile_country_code), the countries with the most cell towers are: the USA, Germany, and Russia.
|
Based on the above query and the [MCC list](https://en.wikipedia.org/wiki/Mobile_country_code), the countries with the most cell towers are: the USA, Germany, and Russia.
|
||||||
|
|
||||||
You may want to create an [External Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) in ClickHouse to decode these values.
|
You may want to create a [Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) in ClickHouse to decode these values.
|
||||||
|
|
||||||
## Use case: Incorporate geo data {#use-case}
|
## Use case: Incorporate geo data {#use-case}
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
After Width: | Height: | Size: 277 KiB |
Binary file not shown.
After Width: | Height: | Size: 315 KiB |
@ -5,7 +5,7 @@ sidebar_label: Input and Output Formats
|
|||||||
title: Formats for Input and Output Data
|
title: Formats for Input and Output Data
|
||||||
---
|
---
|
||||||
|
|
||||||
ClickHouse can accept and return data in various formats. A format supported for input can be used to parse the data provided to `INSERT`s, to perform `SELECT`s from a file-backed table such as File, URL or HDFS, or to read an external dictionary. A format supported for output can be used to arrange the
|
ClickHouse can accept and return data in various formats. A format supported for input can be used to parse the data provided to `INSERT`s, to perform `SELECT`s from a file-backed table such as File, URL or HDFS, or to read a dictionary. A format supported for output can be used to arrange the
|
||||||
results of a `SELECT`, and to perform `INSERT`s into a file-backed table.
|
results of a `SELECT`, and to perform `INSERT`s into a file-backed table.
|
||||||
|
|
||||||
The supported formats are:
|
The supported formats are:
|
||||||
|
@ -130,7 +130,7 @@ SHOW TABLES FROM mydatabase;
|
|||||||
└────────┘
|
└────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with an external dictionary with source MySQL
|
### Example of using named collections with a dictionary with source MySQL
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DICTIONARY dict (A Int64, B String)
|
CREATE DICTIONARY dict (A Int64, B String)
|
||||||
@ -213,7 +213,7 @@ SHOW TABLES FROM mydatabase
|
|||||||
└──────┘
|
└──────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with an external dictionary with source POSTGRESQL
|
### Example of using named collections with a dictionary with source POSTGRESQL
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DICTIONARY dict (a Int64, b String)
|
CREATE DICTIONARY dict (a Int64, b String)
|
||||||
@ -270,7 +270,7 @@ SELECT * FROM remote(remote1, database = default, table = test);
|
|||||||
└───┴───┘
|
└───┴───┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with an external dictionary with source ClickHouse
|
### Example of using named collections with a dictionary with source ClickHouse
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DICTIONARY dict(a Int64, b String)
|
CREATE DICTIONARY dict(a Int64, b String)
|
||||||
|
@ -268,14 +268,14 @@ The path to the table in ZooKeeper.
|
|||||||
|
|
||||||
## dictionaries_config {#server_configuration_parameters-dictionaries_config}
|
## dictionaries_config {#server_configuration_parameters-dictionaries_config}
|
||||||
|
|
||||||
The path to the config file for external dictionaries.
|
The path to the config file for dictionaries.
|
||||||
|
|
||||||
Path:
|
Path:
|
||||||
|
|
||||||
- Specify the absolute path or the path relative to the server config file.
|
- Specify the absolute path or the path relative to the server config file.
|
||||||
- The path can contain wildcards \* and ?.
|
- The path can contain wildcards \* and ?.
|
||||||
|
|
||||||
See also “[External dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)”.
|
See also “[Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)”.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -16,44 +16,54 @@ Queries in ClickHouse can be divided into several types:
|
|||||||
|
|
||||||
The following settings regulate user permissions by the type of query:
|
The following settings regulate user permissions by the type of query:
|
||||||
|
|
||||||
- [readonly](#settings_readonly) — Restricts permissions for all types of queries except DDL queries.
|
## readonly
|
||||||
- [allow_ddl](#settings_allow_ddl) — Restricts permissions for DDL queries.
|
Restricts permissions for read data, write data, and change settings queries.
|
||||||
|
|
||||||
`KILL QUERY` can be performed with any settings.
|
When set to 1, allows:
|
||||||
|
|
||||||
## readonly {#settings_readonly}
|
- All types of read queries (like SELECT and equivalent queries).
|
||||||
|
- Queries that modify only session context (like USE).
|
||||||
|
|
||||||
Restricts permissions for reading data, write data and change settings queries.
|
When set to 2, allows the above plus:
|
||||||
|
- SET and CREATE TEMPORARY TABLE
|
||||||
|
|
||||||
See how the queries are divided into types [above](#permissions_for_queries).
|
:::tip
|
||||||
|
Queries like EXISTS, DESCRIBE, EXPLAIN, SHOW PROCESSLIST, etc are equivalent to SELECT, because they just do select from system tables.
|
||||||
|
:::
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- 0 — All queries are allowed.
|
- 0 — Read, Write, and Change settings queries are allowed.
|
||||||
- 1 — Only read data queries are allowed.
|
- 1 — Only Read data queries are allowed.
|
||||||
- 2 — Read data and change settings queries are allowed.
|
- 2 — Read data and Change settings queries are allowed.
|
||||||
|
|
||||||
|
Default value: 0
|
||||||
|
|
||||||
|
:::note
|
||||||
After setting `readonly = 1`, the user can’t change `readonly` and `allow_ddl` settings in the current session.
|
After setting `readonly = 1`, the user can’t change `readonly` and `allow_ddl` settings in the current session.
|
||||||
|
|
||||||
When using the `GET` method in the [HTTP interface](../../interfaces/http.md), `readonly = 1` is set automatically. To modify data, use the `POST` method.
|
When using the `GET` method in the [HTTP interface](../../interfaces/http.md), `readonly = 1` is set automatically. To modify data, use the `POST` method.
|
||||||
|
|
||||||
Setting `readonly = 1` prohibit the user from changing all the settings. There is a way to prohibit the user from changing only specific settings. Also there is a way to allow changing only specific settings under `readonly = 1` restrictions. For details see [constraints on settings](../../operations/settings/constraints-on-settings.md).
|
Setting `readonly = 1` prohibits the user from changing settings. There is a way to prohibit the user from changing only specific settings. Also there is a way to allow changing only specific settings under `readonly = 1` restrictions. For details see [constraints on settings](../../operations/settings/constraints-on-settings.md).
|
||||||
|
:::
|
||||||
|
|
||||||
Default value: 0
|
|
||||||
|
|
||||||
## allow_ddl {#settings_allow_ddl}
|
## allow_ddl {#settings_allow_ddl}
|
||||||
|
|
||||||
Allows or denies [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries.
|
Allows or denies [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries.
|
||||||
|
|
||||||
See how the queries are divided into types [above](#permissions_for_queries).
|
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- 0 — DDL queries are not allowed.
|
- 0 — DDL queries are not allowed.
|
||||||
- 1 — DDL queries are allowed.
|
- 1 — DDL queries are allowed.
|
||||||
|
|
||||||
You can’t execute `SET allow_ddl = 1` if `allow_ddl = 0` for the current session.
|
|
||||||
|
|
||||||
Default value: 1
|
Default value: 1
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/settings/permissions_for_queries/) <!--hide-->
|
:::note
|
||||||
|
You cannot run `SET allow_ddl = 1` if `allow_ddl = 0` for the current session.
|
||||||
|
:::
|
||||||
|
|
||||||
|
|
||||||
|
:::note KILL QUERY
|
||||||
|
`KILL QUERY` can be performed with any combination of readonly and allow_ddl settings.
|
||||||
|
:::
|
||||||
|
@ -3399,6 +3399,17 @@ Use schema from cache for URL with last modification time validation (for urls w
|
|||||||
|
|
||||||
Default value: `true`.
|
Default value: `true`.
|
||||||
|
|
||||||
|
## use_structure_from_insertion_table_in_table_functions {use_structure_from_insertion_table_in_table_functions}
|
||||||
|
|
||||||
|
Use structure from insertion table instead of schema inference from data.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 - disabled
|
||||||
|
- 1 - enabled
|
||||||
|
- 2 - auto
|
||||||
|
|
||||||
|
Default value: 2.
|
||||||
|
|
||||||
## compatibility {#compatibility}
|
## compatibility {#compatibility}
|
||||||
|
|
||||||
This setting changes other settings according to provided ClickHouse version.
|
This setting changes other settings according to provided ClickHouse version.
|
||||||
|
@ -7,8 +7,8 @@ Contains information about stack traces for fatal errors. The table does not exi
|
|||||||
|
|
||||||
Columns:
|
Columns:
|
||||||
|
|
||||||
- `event_date` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date of the event.
|
- `event_date` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date of the event.
|
||||||
- `event_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Time of the event.
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the event.
|
||||||
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Timestamp of the event with nanoseconds.
|
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Timestamp of the event with nanoseconds.
|
||||||
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Signal number.
|
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Signal number.
|
||||||
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Thread ID.
|
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Thread ID.
|
||||||
|
@ -3,7 +3,7 @@ slug: /en/operations/system-tables/dictionaries
|
|||||||
---
|
---
|
||||||
# dictionaries
|
# dictionaries
|
||||||
|
|
||||||
Contains information about [external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
Contains information about [dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||||
|
|
||||||
Columns:
|
Columns:
|
||||||
|
|
||||||
@ -33,7 +33,7 @@ Columns:
|
|||||||
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Minimum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds.
|
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Minimum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds.
|
||||||
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Maximum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds.
|
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Maximum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds.
|
||||||
- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time for loading the dictionary.
|
- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time for loading the dictionary.
|
||||||
- `last_successful_update_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes.
|
- `last_successful_update_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with dictionary sources and investigate the causes.
|
||||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading.
|
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading.
|
||||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created.
|
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created.
|
||||||
- `comment` ([String](../../sql-reference/data-types/string.md)) — Text of the comment to dictionary.
|
- `comment` ([String](../../sql-reference/data-types/string.md)) — Text of the comment to dictionary.
|
||||||
|
@ -15,7 +15,7 @@ Columns:
|
|||||||
|
|
||||||
- `command` ([String](/docs/en/sql-reference/data-types/string.md)) — The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
|
- `command` ([String](/docs/en/sql-reference/data-types/string.md)) — The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
|
||||||
|
|
||||||
- `create_time` ([Datetime](/docs/en/sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution.
|
- `create_time` ([DateTime](/docs/en/sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution.
|
||||||
|
|
||||||
- `block_numbers.partition_id` ([Array](/docs/en/sql-reference/data-types/array.md)([String](/docs/en/sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty.
|
- `block_numbers.partition_id` ([Array](/docs/en/sql-reference/data-types/array.md)([String](/docs/en/sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty.
|
||||||
|
|
||||||
@ -39,7 +39,7 @@ If there were problems with mutating some data parts, the following columns cont
|
|||||||
|
|
||||||
- `latest_failed_part` ([String](/docs/en/sql-reference/data-types/string.md)) — The name of the most recent part that could not be mutated.
|
- `latest_failed_part` ([String](/docs/en/sql-reference/data-types/string.md)) — The name of the most recent part that could not be mutated.
|
||||||
|
|
||||||
- `latest_fail_time` ([Datetime](/docs/en/sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure.
|
- `latest_fail_time` ([DateTime](/docs/en/sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure.
|
||||||
|
|
||||||
- `latest_fail_reason` ([String](/docs/en/sql-reference/data-types/string.md)) — The exception message that caused the most recent part mutation failure.
|
- `latest_fail_reason` ([String](/docs/en/sql-reference/data-types/string.md)) — The exception message that caused the most recent part mutation failure.
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Columns:
|
|||||||
- `MUTATE_PART` — Apply one or several mutations to the part.
|
- `MUTATE_PART` — Apply one or several mutations to the part.
|
||||||
- `ALTER_METADATA` — Apply alter modification according to global /metadata and /columns paths.
|
- `ALTER_METADATA` — Apply alter modification according to global /metadata and /columns paths.
|
||||||
|
|
||||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
||||||
|
|
||||||
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of replicas waiting for the task to complete with confirmation of completion. This column is only relevant for the `GET_PARTS` task.
|
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of replicas waiting for the task to complete with confirmation of completion. This column is only relevant for the `GET_PARTS` task.
|
||||||
|
|
||||||
@ -47,13 +47,13 @@ Columns:
|
|||||||
|
|
||||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any).
|
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any).
|
||||||
|
|
||||||
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last attempted.
|
- `last_attempt_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last attempted.
|
||||||
|
|
||||||
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of postponed tasks.
|
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of postponed tasks.
|
||||||
|
|
||||||
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — The reason why the task was postponed.
|
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — The reason why the task was postponed.
|
||||||
|
|
||||||
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last postponed.
|
- `last_postpone_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last postponed.
|
||||||
|
|
||||||
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — Type of the current merge. Empty if it's a mutation.
|
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — Type of the current merge. Empty if it's a mutation.
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: Date32
|
|||||||
|
|
||||||
# Date32
|
# Date32
|
||||||
|
|
||||||
A date. Supports the date range same with [Datetime64](../../sql-reference/data-types/datetime64.md). Stored in four bytes as the number of days since 1900-01-01. Allows storing values till 2299-12-31.
|
A date. Supports the date range same with [DateTime64](../../sql-reference/data-types/datetime64.md). Stored in four bytes as the number of days since 1900-01-01. Allows storing values till 2299-12-31.
|
||||||
|
|
||||||
**Examples**
|
**Examples**
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ sidebar_position: 48
|
|||||||
sidebar_label: DateTime
|
sidebar_label: DateTime
|
||||||
---
|
---
|
||||||
|
|
||||||
# Datetime
|
# DateTime
|
||||||
|
|
||||||
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day.
|
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day.
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ sidebar_position: 49
|
|||||||
sidebar_label: DateTime64
|
sidebar_label: DateTime64
|
||||||
---
|
---
|
||||||
|
|
||||||
# Datetime64
|
# DateTime64
|
||||||
|
|
||||||
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision
|
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
position: 37
|
position: 37
|
||||||
label: 'External Dictionaries'
|
label: 'Dictionaries'
|
||||||
collapsible: true
|
collapsible: true
|
||||||
collapsed: true
|
collapsed: true
|
||||||
link:
|
link:
|
||||||
type: generated-index
|
type: generated-index
|
||||||
title: External Dictionaries
|
title: Dictionaries
|
||||||
slug: /en/sql-reference/dictionaries/external-dictionaries
|
slug: /en/sql-reference/dictionaries/external-dictionaries
|
||||||
|
@ -0,0 +1,4 @@
|
|||||||
|
:::tip
|
||||||
|
If you are using a dictionary with ClickHouse Cloud please use the DDL query option to create your dictionaries, and create your dictionary as user `default`.
|
||||||
|
Also, verify the list of supported dictionary sources in the [Cloud Compatibility guide](/docs/en/whats-new/cloud-capabilities.md).
|
||||||
|
:::
|
@ -3,6 +3,7 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-l
|
|||||||
sidebar_position: 41
|
sidebar_position: 41
|
||||||
sidebar_label: Storing Dictionaries in Memory
|
sidebar_label: Storing Dictionaries in Memory
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# Storing Dictionaries in Memory
|
# Storing Dictionaries in Memory
|
||||||
|
|
||||||
@ -22,7 +23,9 @@ ClickHouse generates an exception for errors with dictionaries. Examples of erro
|
|||||||
- The dictionary being accessed could not be loaded.
|
- The dictionary being accessed could not be loaded.
|
||||||
- Error querying a `cached` dictionary.
|
- Error querying a `cached` dictionary.
|
||||||
|
|
||||||
You can view the list of external dictionaries and their statuses in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table.
|
You can view the list of dictionaries and their statuses in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table.
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
The configuration looks like this:
|
The configuration looks like this:
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-l
|
|||||||
sidebar_position: 42
|
sidebar_position: 42
|
||||||
sidebar_label: Dictionary Updates
|
sidebar_label: Dictionary Updates
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# Dictionary Updates
|
# Dictionary Updates
|
||||||
|
|
||||||
@ -12,6 +13,8 @@ Dictionary updates (other than loading for first use) do not block queries. Duri
|
|||||||
|
|
||||||
Example of settings:
|
Example of settings:
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<dictionary>
|
<dictionary>
|
||||||
...
|
...
|
||||||
|
@ -4,12 +4,15 @@ sidebar_position: 46
|
|||||||
sidebar_label: Polygon Dictionaries With Grids
|
sidebar_label: Polygon Dictionaries With Grids
|
||||||
title: "Polygon dictionaries"
|
title: "Polygon dictionaries"
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
Polygon dictionaries allow you to efficiently search for the polygon containing specified points.
|
Polygon dictionaries allow you to efficiently search for the polygon containing specified points.
|
||||||
For example: defining a city area by geographical coordinates.
|
For example: defining a city area by geographical coordinates.
|
||||||
|
|
||||||
Example of a polygon dictionary configuration:
|
Example of a polygon dictionary configuration:
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<dictionary>
|
<dictionary>
|
||||||
<structure>
|
<structure>
|
||||||
@ -78,7 +81,7 @@ To respond to the query, there is a corresponding cell, and the index for the po
|
|||||||
|
|
||||||
- `POLYGON`. Synonym to `POLYGON_INDEX_CELL`.
|
- `POLYGON`. Synonym to `POLYGON_INDEX_CELL`.
|
||||||
|
|
||||||
Dictionary queries are carried out using standard [functions](../../../sql-reference/functions/ext-dict-functions.md) for working with external dictionaries.
|
Dictionary queries are carried out using standard [functions](../../../sql-reference/functions/ext-dict-functions.md) for working with dictionaries.
|
||||||
An important difference is that here the keys will be the points for which you want to find the polygon containing them.
|
An important difference is that here the keys will be the points for which you want to find the polygon containing them.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
@ -1,12 +1,15 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources
|
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources
|
||||||
sidebar_position: 43
|
sidebar_position: 43
|
||||||
sidebar_label: Sources of External Dictionaries
|
sidebar_label: Dictionary Sources
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# Sources of External Dictionaries
|
# Dictionary Sources
|
||||||
|
|
||||||
An external dictionary can be connected to ClickHouse from many different sources.
|
<CloudDetails />
|
||||||
|
|
||||||
|
A dictionary can be connected to ClickHouse from many different sources.
|
||||||
|
|
||||||
If the dictionary is configured using an xml-file, the configuration looks like this:
|
If the dictionary is configured using an xml-file, the configuration looks like this:
|
||||||
|
|
||||||
@ -65,13 +68,13 @@ Types of sources (`source_type`):
|
|||||||
- [Executable Pool](#dicts-external_dicts_dict_sources-executable_pool)
|
- [Executable Pool](#dicts-external_dicts_dict_sources-executable_pool)
|
||||||
- [HTTP(s)](#dicts-external_dicts_dict_sources-http)
|
- [HTTP(s)](#dicts-external_dicts_dict_sources-http)
|
||||||
- DBMS
|
- DBMS
|
||||||
- [ODBC](#dicts-external_dicts_dict_sources-odbc)
|
- [ODBC](#odbc)
|
||||||
- [MySQL](#dicts-external_dicts_dict_sources-mysql)
|
- [MySQL](#mysql)
|
||||||
- [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse)
|
- [ClickHouse](#clickhouse)
|
||||||
- [MongoDB](#dicts-external_dicts_dict_sources-mongodb)
|
- [MongoDB](#mongodb)
|
||||||
- [Redis](#dicts-external_dicts_dict_sources-redis)
|
- [Redis](#redis)
|
||||||
- [Cassandra](#dicts-external_dicts_dict_sources-cassandra)
|
- [Cassandra](#cassandra)
|
||||||
- [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql)
|
- [PostgreSQL](#postgresql)
|
||||||
|
|
||||||
## Local File
|
## Local File
|
||||||
|
|
||||||
|
@ -3,9 +3,12 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-s
|
|||||||
sidebar_position: 44
|
sidebar_position: 44
|
||||||
sidebar_label: Dictionary Key and Fields
|
sidebar_label: Dictionary Key and Fields
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# Dictionary Key and Fields
|
# Dictionary Key and Fields
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
The `structure` clause describes the dictionary key and fields available for queries.
|
The `structure` clause describes the dictionary key and fields available for queries.
|
||||||
|
|
||||||
XML description:
|
XML description:
|
||||||
@ -171,5 +174,5 @@ Configuration fields:
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Functions for working with external dictionaries](../../../sql-reference/functions/ext-dict-functions.md).
|
- [Functions for working with dictionaries](../../../sql-reference/functions/ext-dict-functions.md).
|
||||||
|
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict
|
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict
|
||||||
sidebar_position: 40
|
sidebar_position: 40
|
||||||
sidebar_label: Configuring an External Dictionary
|
sidebar_label: Configuring a Dictionary
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# Configuring an External Dictionary
|
# Configuring a Dictionary
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
If dictionary is configured using xml file, than dictionary configuration has the following structure:
|
If dictionary is configured using xml file, than dictionary configuration has the following structure:
|
||||||
|
|
||||||
|
@ -3,18 +3,21 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts
|
|||||||
sidebar_position: 39
|
sidebar_position: 39
|
||||||
sidebar_label: General Description
|
sidebar_label: General Description
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# External Dictionaries
|
# Dictionaries
|
||||||
|
|
||||||
You can add your own dictionaries from various data sources. The data source for a dictionary can be a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Sources for external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)”.
|
<CloudDetails />
|
||||||
|
|
||||||
|
You can add your own dictionaries from various data sources. The source for a dictionary can be a ClickHouse table, a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Dictionary Sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)”.
|
||||||
|
|
||||||
ClickHouse:
|
ClickHouse:
|
||||||
|
|
||||||
- Fully or partially stores dictionaries in RAM.
|
- Fully or partially stores dictionaries in RAM.
|
||||||
- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically.
|
- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically.
|
||||||
- Allows to create external dictionaries with xml files or [DDL queries](../../../sql-reference/statements/create/dictionary.md).
|
- Allows creating dictionaries with xml files or [DDL queries](../../../sql-reference/statements/create/dictionary.md).
|
||||||
|
|
||||||
The configuration of external dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config) parameter.
|
The configuration of dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config) parameter.
|
||||||
|
|
||||||
Dictionaries can be loaded at server startup or at first use, depending on the [dictionaries_lazy_load](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) setting.
|
Dictionaries can be loaded at server startup or at first use, depending on the [dictionaries_lazy_load](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) setting.
|
||||||
|
|
||||||
@ -24,6 +27,20 @@ The [dictionaries](../../../operations/system-tables/dictionaries.md#system_tabl
|
|||||||
- Configuration parameters.
|
- Configuration parameters.
|
||||||
- Metrics like amount of RAM allocated for the dictionary or a number of queries since the dictionary was successfully loaded.
|
- Metrics like amount of RAM allocated for the dictionary or a number of queries since the dictionary was successfully loaded.
|
||||||
|
|
||||||
|
## Creating a dictionary with a DDL query
|
||||||
|
|
||||||
|
Dictionaries can be created with [DDL queries](../../../sql-reference/statements/create/dictionary.md), and this is the recommended method because with DDL created dictionaries:
|
||||||
|
- No additional records are added to server configuration files
|
||||||
|
- The dictionaries can be worked with as first-class entities, like tables or views
|
||||||
|
- Data can be read directly, using familiar SELECT rather than dictionary table functions
|
||||||
|
- The dictionaries can be easily renamed
|
||||||
|
|
||||||
|
## Creating a dictionary with a configuration file
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Creating a dictionary with a configuration file is not applicable to ClickHouse Cloud. Please use DDL (see above), and create your dictionary as user `default`.
|
||||||
|
:::
|
||||||
|
|
||||||
The dictionary configuration file has the following format:
|
The dictionary configuration file has the following format:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
@ -44,18 +61,17 @@ The dictionary configuration file has the following format:
|
|||||||
|
|
||||||
You can [configure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) any number of dictionaries in the same file.
|
You can [configure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) any number of dictionaries in the same file.
|
||||||
|
|
||||||
[DDL queries for dictionaries](../../../sql-reference/statements/create/dictionary.md) does not require any additional records in server configuration. They allow to work with dictionaries as first-class entities, like tables or views.
|
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../../sql-reference/functions/other-functions.md) function). This functionality is not related to external dictionaries.
|
You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../../sql-reference/functions/other-functions.md) function). This functionality is not related to dictionaries.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## See Also
|
## See Also
|
||||||
|
|
||||||
- [Configuring an External Dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md)
|
- [Configuring a Dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md)
|
||||||
- [Storing Dictionaries in Memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md)
|
- [Storing Dictionaries in Memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md)
|
||||||
- [Dictionary Updates](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md)
|
- [Dictionary Updates](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md)
|
||||||
- [Sources of External Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)
|
- [Dictionary Sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)
|
||||||
- [Dictionary Key and Fields](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md)
|
- [Dictionary Key and Fields](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md)
|
||||||
- [Functions for Working with External Dictionaries](../../../sql-reference/functions/ext-dict-functions.md)
|
- [Functions for Working with Dictionaries](../../../sql-reference/functions/ext-dict-functions.md)
|
||||||
|
|
||||||
|
@ -12,6 +12,6 @@ ClickHouse supports special functions for working with dictionaries that can be
|
|||||||
|
|
||||||
ClickHouse supports:
|
ClickHouse supports:
|
||||||
|
|
||||||
- [Built-in dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md).
|
- [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md#dicts-external-dicts) with a [set of functions](../../sql-reference/functions/ext-dict-functions.md).
|
||||||
- [Plug-in (external) dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md#dicts-external-dicts) with a [set of functions](../../sql-reference/functions/ext-dict-functions.md).
|
- [Embedded dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md).
|
||||||
|
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/dictionaries/internal-dicts
|
slug: /en/sql-reference/dictionaries/internal-dicts
|
||||||
sidebar_position: 39
|
sidebar_position: 39
|
||||||
sidebar_label: Internal Dictionaries
|
sidebar_label: Embedded Dictionaries
|
||||||
---
|
---
|
||||||
|
import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.md';
|
||||||
|
|
||||||
# Internal Dictionaries
|
# Embedded Dictionaries
|
||||||
|
|
||||||
|
<SelfManaged />
|
||||||
|
|
||||||
ClickHouse contains a built-in feature for working with a geobase.
|
ClickHouse contains a built-in feature for working with a geobase.
|
||||||
|
|
||||||
|
@ -65,6 +65,11 @@ An exception is thrown when dividing by zero or when dividing a minimal negative
|
|||||||
|
|
||||||
Differs from [modulo](#modulo) in that it returns zero when the divisor is zero.
|
Differs from [modulo](#modulo) in that it returns zero when the divisor is zero.
|
||||||
|
|
||||||
|
## positive_modulo(a, b)
|
||||||
|
Calculates the remainder when dividing `a` by `b`. Similar to function `modulo` except that `positive_modulo` always return non-negative number.
|
||||||
|
|
||||||
|
Notice that `positive_modulo` is 4-5 times slower than `modulo`. You should not use `positive_modulo` unless you want to get positive result and don't care about performance too much.
|
||||||
|
|
||||||
## negate(a), -a operator
|
## negate(a), -a operator
|
||||||
|
|
||||||
Calculates a number with the reverse sign. The result is always signed.
|
Calculates a number with the reverse sign. The result is always signed.
|
||||||
|
@ -550,7 +550,7 @@ Alias: `dateTrunc`.
|
|||||||
|
|
||||||
- Value, truncated to the specified part of date.
|
- Value, truncated to the specified part of date.
|
||||||
|
|
||||||
Type: [Datetime](../../sql-reference/data-types/datetime.md).
|
Type: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
@ -881,7 +881,7 @@ now([timezone])
|
|||||||
|
|
||||||
- Current date and time.
|
- Current date and time.
|
||||||
|
|
||||||
Type: [Datetime](../../sql-reference/data-types/datetime.md).
|
Type: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
@ -932,7 +932,7 @@ now64([scale], [timezone])
|
|||||||
|
|
||||||
- Current date and time with sub-second precision.
|
- Current date and time with sub-second precision.
|
||||||
|
|
||||||
Type: [Datetime64](../../sql-reference/data-types/datetime64.md).
|
Type: [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
@ -968,7 +968,7 @@ nowInBlock([timezone])
|
|||||||
|
|
||||||
- Current date and time at the moment of processing of each block of data.
|
- Current date and time at the moment of processing of each block of data.
|
||||||
|
|
||||||
Type: [Datetime](../../sql-reference/data-types/datetime.md).
|
Type: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -1,20 +1,20 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/functions/ext-dict-functions
|
slug: /en/sql-reference/functions/ext-dict-functions
|
||||||
sidebar_position: 58
|
sidebar_position: 58
|
||||||
sidebar_label: External Dictionaries
|
sidebar_label: Dictionaries
|
||||||
---
|
---
|
||||||
|
|
||||||
|
# Functions for Working with Dictionaries
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
For dictionaries created with [DDL queries](../../sql-reference/statements/create/dictionary.md), the `dict_name` parameter must be fully specified, like `<database>.<dict_name>`. Otherwise, the current database is used.
|
For dictionaries created with [DDL queries](../../sql-reference/statements/create/dictionary.md), the `dict_name` parameter must be fully specified, like `<database>.<dict_name>`. Otherwise, the current database is used.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
# Functions for Working with External Dictionaries
|
For information on connecting and configuring dictionaries, see [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||||
|
|
||||||
For information on connecting and configuring external dictionaries, see [External dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
|
||||||
|
|
||||||
## dictGet, dictGetOrDefault, dictGetOrNull
|
## dictGet, dictGetOrDefault, dictGetOrNull
|
||||||
|
|
||||||
Retrieves values from an external dictionary.
|
Retrieves values from a dictionary.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
dictGet('dict_name', attr_names, id_expr)
|
dictGet('dict_name', attr_names, id_expr)
|
||||||
@ -52,7 +52,7 @@ Create a text file `ext-dict-test.csv` containing the following:
|
|||||||
|
|
||||||
The first column is `id`, the second column is `c1`.
|
The first column is `id`, the second column is `c1`.
|
||||||
|
|
||||||
Configure the external dictionary:
|
Configure the dictionary:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
@ -112,7 +112,7 @@ Create a text file `ext-dict-mult.csv` containing the following:
|
|||||||
|
|
||||||
The first column is `id`, the second is `c1`, the third is `c2`.
|
The first column is `id`, the second is `c1`, the third is `c2`.
|
||||||
|
|
||||||
Configure the external dictionary:
|
Configure the dictionary:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
@ -185,7 +185,7 @@ INSERT INTO range_key_dictionary_source_table VALUES(2, toDate('2019-05-20'), to
|
|||||||
INSERT INTO range_key_dictionary_source_table VALUES(3, toDate('2019-05-20'), toDate('2019-05-20'), 'Third', 'Third');
|
INSERT INTO range_key_dictionary_source_table VALUES(3, toDate('2019-05-20'), toDate('2019-05-20'), 'Third', 'Third');
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the external dictionary:
|
Create the dictionary:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DICTIONARY range_key_dictionary
|
CREATE DICTIONARY range_key_dictionary
|
||||||
@ -226,7 +226,7 @@ Result:
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [External Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)
|
- [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)
|
||||||
|
|
||||||
## dictHas
|
## dictHas
|
||||||
|
|
||||||
|
@ -549,3 +549,33 @@ Result:
|
|||||||
│ 3.141592653589793 │
|
│ 3.141592653589793 │
|
||||||
└───────────────────┘
|
└───────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## factorial(n)
|
||||||
|
|
||||||
|
Computes the factorial of an integer value. It works with any native integer type including UInt(8|16|32|64) and Int(8|16|32|64). The return type is UInt64.
|
||||||
|
|
||||||
|
The factorial of 0 is 1. Likewise, the factorial() function returns 1 for any negative value. The maximum positive value for the input argument is 20, a value of 21 or greater will cause exception throw.
|
||||||
|
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
factorial(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT factorial(10);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─factorial(10)─┐
|
||||||
|
│ 3628800 │
|
||||||
|
└───────────────┘
|
||||||
|
```
|
||||||
|
@ -24,6 +24,11 @@ Returns a pseudo-random UInt64 number, evenly distributed among all UInt64-type
|
|||||||
|
|
||||||
Uses a linear congruential generator.
|
Uses a linear congruential generator.
|
||||||
|
|
||||||
|
## canonicalRand
|
||||||
|
The function generates pseudo random results with independent and identically distributed uniformly distributed values in [0, 1).
|
||||||
|
|
||||||
|
Non-deterministic. Return type is Float64.
|
||||||
|
|
||||||
## randConstant
|
## randConstant
|
||||||
|
|
||||||
Produces a constant column with a random value.
|
Produces a constant column with a random value.
|
||||||
|
@ -131,7 +131,7 @@ Type: `UInt32`.
|
|||||||
### regionToPopulation(id\[, geobase\])
|
### regionToPopulation(id\[, geobase\])
|
||||||
|
|
||||||
Gets the population for a region.
|
Gets the population for a region.
|
||||||
The population can be recorded in files with the geobase. See the section “External dictionaries”.
|
The population can be recorded in files with the geobase. See the section “Dictionaries”.
|
||||||
If the population is not recorded for the region, it returns 0.
|
If the population is not recorded for the region, it returns 0.
|
||||||
In the geobase, the population might be recorded for child regions, but not for parent regions.
|
In the geobase, the population might be recorded for child regions, but not for parent regions.
|
||||||
|
|
||||||
|
@ -2,9 +2,134 @@
|
|||||||
slug: /en/sql-reference/statements/alter/projection
|
slug: /en/sql-reference/statements/alter/projection
|
||||||
sidebar_position: 49
|
sidebar_position: 49
|
||||||
sidebar_label: PROJECTION
|
sidebar_label: PROJECTION
|
||||||
title: "Manipulating Projections"
|
title: "Projections"
|
||||||
---
|
---
|
||||||
|
|
||||||
|
Projections store data in a format that optimizes query execution, this feature is useful for:
|
||||||
|
- Running queries on a column that is not a part of the primary key
|
||||||
|
- Pre-aggregating columns, it will reduce both computation and IO
|
||||||
|
|
||||||
|
You can define one or more projections for a table, and during the query analysis the projection with the least data to scan will be selected by ClickHouse without modifying the query provided by the user.
|
||||||
|
|
||||||
|
## Example filtering without using primary keys
|
||||||
|
|
||||||
|
Creating the table:
|
||||||
|
```
|
||||||
|
CREATE TABLE visits_order
|
||||||
|
(
|
||||||
|
`user_id` UInt64,
|
||||||
|
`user_name` String,
|
||||||
|
`pages_visited` Nullable(Float64),
|
||||||
|
`user_agent` String
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
PRIMARY KEY user_agent
|
||||||
|
```
|
||||||
|
Using `ALTER TABLE`, we could add the Projection to an existing table:
|
||||||
|
```
|
||||||
|
ALTER TABLE visits_order ADD PROJECTION user_name_projection (
|
||||||
|
SELECT
|
||||||
|
*
|
||||||
|
ORDER BY user_name
|
||||||
|
)
|
||||||
|
|
||||||
|
ALTER TABLE visits_order MATERIALIZE PROJECTION user_name_projection
|
||||||
|
```
|
||||||
|
Inserting the data:
|
||||||
|
```
|
||||||
|
INSERT INTO visits_order SELECT
|
||||||
|
number,
|
||||||
|
'test',
|
||||||
|
1.5 * (number / 2),
|
||||||
|
'Android'
|
||||||
|
FROM numbers(1, 100);
|
||||||
|
```
|
||||||
|
|
||||||
|
The Projection will allow us to filter by `user_name` fast even if in the original Table `user_name` was not defined as a `PRIMARY_KEY`.
|
||||||
|
At query time ClickHouse determined that less data will be processed if the projection is used, as the data is ordered by `user_name`.
|
||||||
|
```
|
||||||
|
SELECT
|
||||||
|
*
|
||||||
|
FROM visits_order
|
||||||
|
WHERE user_name='test'
|
||||||
|
LIMIT 2
|
||||||
|
```
|
||||||
|
|
||||||
|
To verify that a query is using the projection, we could review the `system.query_log` table. On the `projections` field we have the name of the projection used or empty if none has been used:
|
||||||
|
```
|
||||||
|
SELECT query, projections FROM system.query_log WHERE query_id='<query_id>'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example pre-aggregation query
|
||||||
|
|
||||||
|
Creating the table with the Projection:
|
||||||
|
```
|
||||||
|
CREATE TABLE visits
|
||||||
|
(
|
||||||
|
`user_id` UInt64,
|
||||||
|
`user_name` String,
|
||||||
|
`pages_visited` Nullable(Float64),
|
||||||
|
`user_agent` String,
|
||||||
|
PROJECTION projection_visits_by_user
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
user_agent,
|
||||||
|
sum(pages_visited)
|
||||||
|
GROUP BY user_id, user_agent
|
||||||
|
)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
ORDER BY user_agent
|
||||||
|
```
|
||||||
|
Inserting the data:
|
||||||
|
```
|
||||||
|
INSERT INTO visits SELECT
|
||||||
|
number,
|
||||||
|
'test',
|
||||||
|
1.5 * (number / 2),
|
||||||
|
'Android'
|
||||||
|
FROM numbers(1, 100);
|
||||||
|
```
|
||||||
|
```
|
||||||
|
INSERT INTO visits SELECT
|
||||||
|
number,
|
||||||
|
'test',
|
||||||
|
1. * (number / 2),
|
||||||
|
'IOS'
|
||||||
|
FROM numbers(100, 500);
|
||||||
|
```
|
||||||
|
We will execute a first query using `GROUP BY` using the field `user_agent`, this query will not use the projection defined as the pre-aggregation does not match.
|
||||||
|
```
|
||||||
|
SELECT
|
||||||
|
user_agent,
|
||||||
|
count(DISTINCT user_id)
|
||||||
|
FROM visits
|
||||||
|
GROUP BY user_agent
|
||||||
|
```
|
||||||
|
|
||||||
|
To use the projection we could execute queries that select part of, or all of the pre-aggregation and `GROUP BY` fields.
|
||||||
|
```
|
||||||
|
SELECT
|
||||||
|
user_agent
|
||||||
|
FROM visits
|
||||||
|
WHERE user_id > 50 AND user_id < 150
|
||||||
|
GROUP BY user_agent
|
||||||
|
```
|
||||||
|
```
|
||||||
|
SELECT
|
||||||
|
user_agent,
|
||||||
|
sum(pages_visited)
|
||||||
|
FROM visits
|
||||||
|
GROUP BY user_id
|
||||||
|
```
|
||||||
|
|
||||||
|
As mentioned before, we could review the `system.query_log` table. On the `projections` field we have the name of the projection used or empty if none has been used:
|
||||||
|
```
|
||||||
|
SELECT query, projections FROM system.query_log WHERE query_id='<query_id>'
|
||||||
|
```
|
||||||
|
|
||||||
|
# Manipulating Projections
|
||||||
|
|
||||||
The following operations with [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections) are available:
|
The following operations with [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections) are available:
|
||||||
|
|
||||||
## ADD PROJECTION
|
## ADD PROJECTION
|
||||||
|
@ -5,9 +5,9 @@ sidebar_label: DICTIONARY
|
|||||||
title: "CREATE DICTIONARY"
|
title: "CREATE DICTIONARY"
|
||||||
---
|
---
|
||||||
|
|
||||||
Creates a new [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
Creates a new [dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||||
|
|
||||||
**Syntax**
|
## Syntax
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE [OR REPLACE] DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
|
CREATE [OR REPLACE] DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
|
||||||
@ -25,17 +25,21 @@ SETTINGS(setting_name = setting_value, setting_name = setting_value, ...)
|
|||||||
COMMENT 'Comment'
|
COMMENT 'Comment'
|
||||||
```
|
```
|
||||||
|
|
||||||
External dictionary structure consists of attributes. Dictionary attributes are specified similarly to table columns. The only required attribute property is its type, all other properties may have default values.
|
The dictionary structure consists of attributes. Dictionary attributes are specified similarly to table columns. The only required attribute property is its type, all other properties may have default values.
|
||||||
|
|
||||||
`ON CLUSTER` clause allows creating dictionary on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).
|
`ON CLUSTER` clause allows creating dictionary on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).
|
||||||
|
|
||||||
Depending on dictionary [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) one or more attributes can be specified as dictionary keys.
|
Depending on dictionary [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) one or more attributes can be specified as dictionary keys.
|
||||||
|
|
||||||
For more information, see [External Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section.
|
## SOURCE
|
||||||
|
|
||||||
You can add a comment to the dictionary when you creating it using `COMMENT` clause.
|
The source for a dictionary can be a:
|
||||||
|
- table in the current ClickHouse service
|
||||||
|
- table in a remote ClickHouse service
|
||||||
|
- file available by HTTP(S)
|
||||||
|
- another database
|
||||||
|
|
||||||
**Example**
|
### Create a dictionary from a table in the current ClickHouse service
|
||||||
|
|
||||||
Input table `source_table`:
|
Input table `source_table`:
|
||||||
|
|
||||||
@ -49,51 +53,81 @@ Input table `source_table`:
|
|||||||
Creating the dictionary:
|
Creating the dictionary:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE DICTIONARY dictionary_with_comment
|
CREATE DICTIONARY id_value_dictionary
|
||||||
(
|
(
|
||||||
id UInt64,
|
id UInt64,
|
||||||
value String
|
value String
|
||||||
)
|
)
|
||||||
PRIMARY KEY id
|
PRIMARY KEY id
|
||||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
SOURCE(CLICKHOUSE(TABLE 'source_table'))
|
||||||
LAYOUT(FLAT())
|
LAYOUT(FLAT())
|
||||||
LIFETIME(MIN 0 MAX 1000)
|
LIFETIME(MIN 0 MAX 1000)
|
||||||
COMMENT 'The temporary dictionary';
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Output the dictionary:
|
Output the dictionary:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW CREATE DICTIONARY dictionary_with_comment;
|
SHOW CREATE DICTIONARY id_value_dictionary;
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```response
|
||||||
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
CREATE DICTIONARY default.id_value_dictionary
|
||||||
│ CREATE DICTIONARY default.dictionary_with_comment
|
|
||||||
(
|
(
|
||||||
`id` UInt64,
|
`id` UInt64,
|
||||||
`value` String
|
`value` String
|
||||||
)
|
)
|
||||||
PRIMARY KEY id
|
PRIMARY KEY id
|
||||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
SOURCE(CLICKHOUSE(TABLE 'source_table'))
|
||||||
LIFETIME(MIN 0 MAX 1000)
|
LIFETIME(MIN 0 MAX 1000)
|
||||||
LAYOUT(FLAT())
|
LAYOUT(FLAT())
|
||||||
COMMENT 'The temporary dictionary' │
|
|
||||||
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Output the comment to dictionary:
|
### Create a dictionary from a table in a remote ClickHouse service
|
||||||
|
|
||||||
|
Input table (in the remote ClickHouse service) `source_table`:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─id─┬─value──┐
|
||||||
|
│ 1 │ First │
|
||||||
|
│ 2 │ Second │
|
||||||
|
└────┴────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Creating the dictionary:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT comment FROM system.dictionaries WHERE name == 'dictionary_with_comment' AND database == currentDatabase();
|
CREATE DICTIONARY id_value_dictionary
|
||||||
|
(
|
||||||
|
id UInt64,
|
||||||
|
value String
|
||||||
|
)
|
||||||
|
PRIMARY KEY id
|
||||||
|
SOURCE(CLICKHOUSE(HOST 'HOSTNAME' PORT 9000 USER 'default' PASSWORD 'PASSWORD' TABLE 'source_table' DB 'default'))
|
||||||
|
LAYOUT(FLAT())
|
||||||
|
LIFETIME(MIN 0 MAX 1000)
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
### Create a dictionary from a file available by HTTP(S)
|
||||||
┌─comment──────────────────┐
|
|
||||||
│ The temporary dictionary │
|
```sql
|
||||||
└──────────────────────────┘
|
statement: CREATE DICTIONARY default.taxi_zone_dictionary
|
||||||
|
(
|
||||||
|
`LocationID` UInt16 DEFAULT 0,
|
||||||
|
`Borough` String,
|
||||||
|
`Zone` String,
|
||||||
|
`service_zone` String
|
||||||
|
)
|
||||||
|
PRIMARY KEY LocationID
|
||||||
|
SOURCE(HTTP(URL 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/taxi_zone_lookup.csv' FORMAT 'CSVWithNames'))
|
||||||
|
LIFETIME(MIN 0 MAX 0)
|
||||||
|
LAYOUT(HASHED())
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Create a dictionary from another database
|
||||||
|
|
||||||
|
Please see the details in [Dictionary sources](/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md/#dbms).
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — This table contains information about [external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
- For more information, see the [Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section.
|
||||||
|
- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — This table contains information about [Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||||
|
@ -282,7 +282,7 @@ Each time a query is run with the same `JOIN`, the subquery is run again because
|
|||||||
|
|
||||||
In some cases, it is more efficient to use [IN](../../../sql-reference/operators/in.md) instead of `JOIN`.
|
In some cases, it is more efficient to use [IN](../../../sql-reference/operators/in.md) instead of `JOIN`.
|
||||||
|
|
||||||
If you need a `JOIN` for joining with dimension tables (these are relatively small tables that contain dimension properties, such as names for advertising campaigns), a `JOIN` might not be very convenient due to the fact that the right table is re-accessed for every query. For such cases, there is an “external dictionaries” feature that you should use instead of `JOIN`. For more information, see the [External dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section.
|
If you need a `JOIN` for joining with dimension tables (these are relatively small tables that contain dimension properties, such as names for advertising campaigns), a `JOIN` might not be very convenient due to the fact that the right table is re-accessed for every query. For such cases, there is a “dictionaries” feature that you should use instead of `JOIN`. For more information, see the [Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section.
|
||||||
|
|
||||||
### Memory Limitations
|
### Memory Limitations
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ Purge default roles from a user:
|
|||||||
SET DEFAULT ROLE NONE TO user
|
SET DEFAULT ROLE NONE TO user
|
||||||
```
|
```
|
||||||
|
|
||||||
Set all the granted roles as default excepting some of them:
|
Set all the granted roles as default except for specific roles `role1` and `role2`:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SET DEFAULT ROLE ALL EXCEPT role1, role2 TO user
|
SET DEFAULT ROLE ALL EXCEPT role1, role2 TO user
|
||||||
|
@ -198,7 +198,7 @@ Result:
|
|||||||
|
|
||||||
## SHOW DICTIONARIES
|
## SHOW DICTIONARIES
|
||||||
|
|
||||||
Displays a list of [external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
Displays a list of [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW DICTIONARIES [FROM <db>] [LIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
SHOW DICTIONARIES [FROM <db>] [LIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
||||||
|
@ -110,5 +110,5 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123');
|
|||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [The ‘MySQL’ table engine](../../engines/table-engines/integrations/mysql.md)
|
- [The ‘MySQL’ table engine](../../engines/table-engines/integrations/mysql.md)
|
||||||
- [Using MySQL as a source of external dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
- [Using MySQL as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||||
|
|
||||||
|
@ -101,5 +101,5 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test')
|
|||||||
|
|
||||||
## See Also
|
## See Also
|
||||||
|
|
||||||
- [ODBC external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
|
- [ODBC dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
|
||||||
- [ODBC table engine](../../engines/table-engines/integrations/odbc.md).
|
- [ODBC table engine](../../engines/table-engines/integrations/odbc.md).
|
||||||
|
@ -130,6 +130,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
|
|||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [The PostgreSQL table engine](../../engines/table-engines/integrations/postgresql.md)
|
- [The PostgreSQL table engine](../../engines/table-engines/integrations/postgresql.md)
|
||||||
- [Using PostgreSQL as a source of external dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
- [Using PostgreSQL as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/postgresql/) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/postgresql/) <!--hide-->
|
||||||
|
@ -7,8 +7,8 @@ slug: /ru/operations/system-tables/crash-log
|
|||||||
|
|
||||||
Колонки:
|
Колонки:
|
||||||
|
|
||||||
- `event_date` ([Datetime](../../sql-reference/data-types/datetime.md)) — Дата события.
|
- `event_date` ([DateTime](../../sql-reference/data-types/datetime.md)) — Дата события.
|
||||||
- `event_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Время события.
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Время события.
|
||||||
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Время события с наносекундами.
|
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Время события с наносекундами.
|
||||||
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Номер сигнала, пришедшего в поток.
|
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Номер сигнала, пришедшего в поток.
|
||||||
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Идентификатор треда.
|
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Идентификатор треда.
|
||||||
|
@ -15,7 +15,7 @@ slug: /ru/operations/system-tables/mutations
|
|||||||
|
|
||||||
- `command` ([String](../../sql-reference/data-types/string.md)) — команда мутации (часть запроса после `ALTER TABLE [db.]table`).
|
- `command` ([String](../../sql-reference/data-types/string.md)) — команда мутации (часть запроса после `ALTER TABLE [db.]table`).
|
||||||
|
|
||||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время создания мутации.
|
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время создания мутации.
|
||||||
|
|
||||||
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Для мутаций реплицированных таблиц массив содержит содержит номера партиций (по одной записи для каждой партиции). Для мутаций нереплицированных таблиц массив пустой.
|
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Для мутаций реплицированных таблиц массив содержит содержит номера партиций (по одной записи для каждой партиции). Для мутаций нереплицированных таблиц массив пустой.
|
||||||
|
|
||||||
@ -39,7 +39,7 @@ slug: /ru/operations/system-tables/mutations
|
|||||||
|
|
||||||
- `latest_failed_part` ([String](../../sql-reference/data-types/string.md)) — имя последнего куска, мутация которого не удалась.
|
- `latest_failed_part` ([String](../../sql-reference/data-types/string.md)) — имя последнего куска, мутация которого не удалась.
|
||||||
|
|
||||||
- `latest_fail_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время последней ошибки мутации.
|
- `latest_fail_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время последней ошибки мутации.
|
||||||
|
|
||||||
- `latest_fail_reason` ([String](../../sql-reference/data-types/string.md)) — причина последней ошибки мутации.
|
- `latest_fail_reason` ([String](../../sql-reference/data-types/string.md)) — причина последней ошибки мутации.
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ slug: /ru/operations/system-tables/replication_queue
|
|||||||
- `MUTATE_PART` — применить одну или несколько мутаций к куску.
|
- `MUTATE_PART` — применить одну или несколько мутаций к куску.
|
||||||
- `ALTER_METADATA` — применить изменения структуры таблицы в результате запросов с выражением `ALTER`.
|
- `ALTER_METADATA` — применить изменения структуры таблицы в результате запросов с выражением `ALTER`.
|
||||||
|
|
||||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение.
|
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение.
|
||||||
|
|
||||||
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество реплик, ожидающих завершения задачи, с подтверждением о завершении. Этот столбец актуален только для задачи `GET_PARTS`.
|
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество реплик, ожидающих завершения задачи, с подтверждением о завершении. Этот столбец актуален только для задачи `GET_PARTS`.
|
||||||
|
|
||||||
@ -47,13 +47,13 @@ slug: /ru/operations/system-tables/replication_queue
|
|||||||
|
|
||||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — текст сообщения о последней возникшей ошибке, если таковые имеются.
|
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — текст сообщения о последней возникшей ошибке, если таковые имеются.
|
||||||
|
|
||||||
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время последней попытки выполнить задачу.
|
- `last_attempt_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время последней попытки выполнить задачу.
|
||||||
|
|
||||||
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество отложенных задач.
|
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество отложенных задач.
|
||||||
|
|
||||||
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — причина, по которой была отложена задача.
|
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — причина, по которой была отложена задача.
|
||||||
|
|
||||||
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время, когда была отложена задача в последний раз.
|
- `last_postpone_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время, когда была отложена задача в последний раз.
|
||||||
|
|
||||||
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — тип текущего слияния. Пусто, если это мутация.
|
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — тип текущего слияния. Пусто, если это мутация.
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: Date32
|
|||||||
|
|
||||||
# Date32 {#data_type-datetime32}
|
# Date32 {#data_type-datetime32}
|
||||||
|
|
||||||
Дата. Поддерживается такой же диапазон дат, как для типа [Datetime64](../../sql-reference/data-types/datetime64.md). Значение хранится в четырех байтах и соответствует числу дней с 1900-01-01 по 2299-12-31.
|
Дата. Поддерживается такой же диапазон дат, как для типа [DateTime64](../../sql-reference/data-types/datetime64.md). Значение хранится в четырех байтах и соответствует числу дней с 1900-01-01 по 2299-12-31.
|
||||||
|
|
||||||
**Пример**
|
**Пример**
|
||||||
|
|
||||||
|
@ -602,7 +602,7 @@ date_trunc(unit, value[, timezone])
|
|||||||
|
|
||||||
- Дата и время, отсеченные до указанной части.
|
- Дата и время, отсеченные до указанной части.
|
||||||
|
|
||||||
Тип: [Datetime](../../sql-reference/data-types/datetime.md).
|
Тип: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||||
|
|
||||||
**Примеры**
|
**Примеры**
|
||||||
|
|
||||||
@ -913,7 +913,7 @@ now([timezone])
|
|||||||
|
|
||||||
- Текущие дата и время.
|
- Текущие дата и время.
|
||||||
|
|
||||||
Тип: [Datetime](../../sql-reference/data-types/datetime.md).
|
Тип: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||||
|
|
||||||
**Пример**
|
**Пример**
|
||||||
|
|
||||||
|
@ -7,8 +7,8 @@ slug: /zh/operations/system-tables/crash-log
|
|||||||
|
|
||||||
列信息:
|
列信息:
|
||||||
|
|
||||||
- `event_date` ([Datetime](../../sql-reference/data-types/datetime.md)) — 事件日期.
|
- `event_date` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件日期.
|
||||||
- `event_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — 事件时间.
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件时间.
|
||||||
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — 以纳秒为单位的事件时间戳.
|
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — 以纳秒为单位的事件时间戳.
|
||||||
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — 信号编号.
|
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — 信号编号.
|
||||||
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — 线程ID.
|
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — 线程ID.
|
||||||
|
@ -15,7 +15,7 @@ slug: /zh/operations/system-tables/mutations
|
|||||||
|
|
||||||
- `command` ([String](../../sql-reference/data-types/string.md)) — mutation命令字符串(`ALTER TABLE [db.]table`语句之后的部分)。
|
- `command` ([String](../../sql-reference/data-types/string.md)) — mutation命令字符串(`ALTER TABLE [db.]table`语句之后的部分)。
|
||||||
|
|
||||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — mutation命令提交执行的日期和时间。
|
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — mutation命令提交执行的日期和时间。
|
||||||
|
|
||||||
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — 对于复制表的mutation,该数组包含分区的ID(每个分区都有一条记录)。对于非复制表的mutation,该数组为空。
|
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — 对于复制表的mutation,该数组包含分区的ID(每个分区都有一条记录)。对于非复制表的mutation,该数组为空。
|
||||||
|
|
||||||
@ -39,7 +39,7 @@ slug: /zh/operations/system-tables/mutations
|
|||||||
|
|
||||||
- `latest_failed_part`([String](../../sql-reference/data-types/string.md)) — 最近不能mutation的part的名称。
|
- `latest_failed_part`([String](../../sql-reference/data-types/string.md)) — 最近不能mutation的part的名称。
|
||||||
|
|
||||||
- `latest_fail_time`([Datetime](../../sql-reference/data-types/datetime.md)) — 最近的一个mutation失败的时间。
|
- `latest_fail_time`([DateTime](../../sql-reference/data-types/datetime.md)) — 最近的一个mutation失败的时间。
|
||||||
|
|
||||||
- `latest_fail_reason`([String](../../sql-reference/data-types/string.md)) — 导致最近part的mutation失败的异常消息。
|
- `latest_fail_reason`([String](../../sql-reference/data-types/string.md)) — 导致最近part的mutation失败的异常消息。
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ slug: /zh/operations/system-tables/replication_queue
|
|||||||
- `MUTATE_PART` — 对分片应用一个或多个突变.
|
- `MUTATE_PART` — 对分片应用一个或多个突变.
|
||||||
- `ALTER_METADATA` — 根据全局 /metadata 和 /columns 路径应用alter修改.
|
- `ALTER_METADATA` — 根据全局 /metadata 和 /columns 路径应用alter修改.
|
||||||
|
|
||||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — 提交任务执行的日期和时间.
|
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 提交任务执行的日期和时间.
|
||||||
|
|
||||||
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — 等待任务完成并确认完成的副本数. 此列仅与 `GET_PARTS` 任务相关.
|
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — 等待任务完成并确认完成的副本数. 此列仅与 `GET_PARTS` 任务相关.
|
||||||
|
|
||||||
@ -47,13 +47,13 @@ slug: /zh/operations/system-tables/replication_queue
|
|||||||
|
|
||||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — 发生的最后一个错误的短信(如果有).
|
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — 发生的最后一个错误的短信(如果有).
|
||||||
|
|
||||||
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — 上次尝试任务的日期和时间.
|
- `last_attempt_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 上次尝试任务的日期和时间.
|
||||||
|
|
||||||
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — 延期任务数.
|
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — 延期任务数.
|
||||||
|
|
||||||
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — 任务延期的原因.
|
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — 任务延期的原因.
|
||||||
|
|
||||||
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — 上次推迟任务的日期和时间.
|
- `last_postpone_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 上次推迟任务的日期和时间.
|
||||||
|
|
||||||
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — 当前合并的类型. 如果是突变则为空.
|
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — 当前合并的类型. 如果是突变则为空.
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@ sidebar_label: "ANSI\u517C\u5BB9\u6027"
|
|||||||
| F051-02 | TIME(时间)数据类型(并支持用于表达时间的字面量),小数秒精度至少为0 | 否 {.text-danger} | |
|
| F051-02 | TIME(时间)数据类型(并支持用于表达时间的字面量),小数秒精度至少为0 | 否 {.text-danger} | |
|
||||||
| F051-03 | 时间戳数据类型(并支持用于表达时间戳的字面量),小数秒精度至少为0和6 | 是 {.text-danger} | |
|
| F051-03 | 时间戳数据类型(并支持用于表达时间戳的字面量),小数秒精度至少为0和6 | 是 {.text-danger} | |
|
||||||
| F051-04 | 日期、时间和时间戳数据类型的比较谓词 | 是 {.text-success} | |
|
| F051-04 | 日期、时间和时间戳数据类型的比较谓词 | 是 {.text-success} | |
|
||||||
| F051-05 | Datetime 类型和字符串形式表达的时间之间的显式转换 | 是 {.text-success} | |
|
| F051-05 | DateTime 类型和字符串形式表达的时间之间的显式转换 | 是 {.text-success} | |
|
||||||
| F051-06 | CURRENT_DATE | 否 {.text-danger} | 使用`today()`替代 |
|
| F051-06 | CURRENT_DATE | 否 {.text-danger} | 使用`today()`替代 |
|
||||||
| F051-07 | LOCALTIME | 否 {.text-danger} | 使用`now()`替代 |
|
| F051-07 | LOCALTIME | 否 {.text-danger} | 使用`now()`替代 |
|
||||||
| F051-08 | LOCALTIMESTAMP | 否 {.text-danger} | |
|
| F051-08 | LOCALTIMESTAMP | 否 {.text-danger} | |
|
||||||
|
@ -6,7 +6,7 @@ sidebar_position: 49
|
|||||||
sidebar_label: DateTime64
|
sidebar_label: DateTime64
|
||||||
---
|
---
|
||||||
|
|
||||||
# Datetime64 {#data_type-datetime64}
|
# DateTime64 {#data_type-datetime64}
|
||||||
|
|
||||||
此类型允许以日期(date)加时间(time)的形式来存储一个时刻的时间值,具有定义的亚秒精度
|
此类型允许以日期(date)加时间(time)的形式来存储一个时刻的时间值,具有定义的亚秒精度
|
||||||
|
|
||||||
|
@ -539,7 +539,7 @@ date_trunc(unit, value[, timezone])
|
|||||||
|
|
||||||
- 按指定的单位向前取整后的DateTime。
|
- 按指定的单位向前取整后的DateTime。
|
||||||
|
|
||||||
类型: [Datetime](../../sql-reference/data-types/datetime.md).
|
类型: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||||
|
|
||||||
**示例**
|
**示例**
|
||||||
|
|
||||||
@ -850,7 +850,7 @@ now([timezone])
|
|||||||
|
|
||||||
- 当前日期和时间。
|
- 当前日期和时间。
|
||||||
|
|
||||||
类型: [Datetime](../../sql-reference/data-types/datetime.md).
|
类型: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||||
|
|
||||||
**示例**
|
**示例**
|
||||||
|
|
||||||
|
@ -243,6 +243,7 @@ try
|
|||||||
registerAggregateFunctions();
|
registerAggregateFunctions();
|
||||||
|
|
||||||
processConfig();
|
processConfig();
|
||||||
|
initTtyBuffer(toProgressOption(config().getString("progress", "default")));
|
||||||
|
|
||||||
/// Includes delayed_interactive.
|
/// Includes delayed_interactive.
|
||||||
if (is_interactive)
|
if (is_interactive)
|
||||||
@ -1088,8 +1089,6 @@ void Client::processConfig()
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
std::string progress = config().getString("progress", "tty");
|
|
||||||
need_render_progress = (Poco::icompare(progress, "off") && Poco::icompare(progress, "no") && Poco::icompare(progress, "false") && Poco::icompare(progress, "0"));
|
|
||||||
echo_queries = config().getBool("echo", false);
|
echo_queries = config().getBool("echo", false);
|
||||||
ignore_error = config().getBool("ignore-error", false);
|
ignore_error = config().getBool("ignore-error", false);
|
||||||
|
|
||||||
|
@ -351,7 +351,7 @@ struct LineChange
|
|||||||
++pos;
|
++pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
indent = std::max(255U, num_spaces);
|
indent = std::min(255U, num_spaces);
|
||||||
line.assign(pos, end);
|
line.assign(pos, end);
|
||||||
|
|
||||||
if (pos == end)
|
if (pos == end)
|
||||||
|
@ -149,19 +149,7 @@ std::string getUserName(uid_t user_id)
|
|||||||
Poco::Net::SocketAddress Keeper::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const
|
Poco::Net::SocketAddress Keeper::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const
|
||||||
{
|
{
|
||||||
auto address = makeSocketAddress(host, port, &logger());
|
auto address = makeSocketAddress(host, port, &logger());
|
||||||
#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100
|
|
||||||
if (secure)
|
|
||||||
/// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl
|
|
||||||
/// https://github.com/pocoproject/poco/pull/2257
|
|
||||||
socket.bind(address, /* reuseAddress = */ true);
|
|
||||||
else
|
|
||||||
#endif
|
|
||||||
#if POCO_VERSION < 0x01080000
|
|
||||||
socket.bind(address, /* reuseAddress = */ true);
|
|
||||||
#else
|
|
||||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false));
|
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false));
|
||||||
#endif
|
|
||||||
|
|
||||||
socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64));
|
socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64));
|
||||||
|
|
||||||
return address;
|
return address;
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||||
#include <TableFunctions/registerTableFunctions.h>
|
#include <TableFunctions/registerTableFunctions.h>
|
||||||
#include <Storages/registerStorages.h>
|
#include <Storages/registerStorages.h>
|
||||||
|
#include <Storages/NamedCollections.h>
|
||||||
#include <Dictionaries/registerDictionaries.h>
|
#include <Dictionaries/registerDictionaries.h>
|
||||||
#include <Disks/registerDisks.h>
|
#include <Disks/registerDisks.h>
|
||||||
#include <Formats/registerFormats.h>
|
#include <Formats/registerFormats.h>
|
||||||
@ -118,6 +119,8 @@ void LocalServer::initialize(Poco::Util::Application & self)
|
|||||||
config().getUInt("max_io_thread_pool_size", 100),
|
config().getUInt("max_io_thread_pool_size", 100),
|
||||||
config().getUInt("max_io_thread_pool_free_size", 0),
|
config().getUInt("max_io_thread_pool_free_size", 0),
|
||||||
config().getUInt("io_thread_pool_queue_size", 10000));
|
config().getUInt("io_thread_pool_queue_size", 10000));
|
||||||
|
|
||||||
|
NamedCollectionFactory::instance().initialize(config());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -414,6 +417,8 @@ try
|
|||||||
registerFormats();
|
registerFormats();
|
||||||
|
|
||||||
processConfig();
|
processConfig();
|
||||||
|
initTtyBuffer(toProgressOption(config().getString("progress", "default")));
|
||||||
|
|
||||||
applyCmdSettings(global_context);
|
applyCmdSettings(global_context);
|
||||||
|
|
||||||
if (is_interactive)
|
if (is_interactive)
|
||||||
@ -489,8 +494,6 @@ void LocalServer::processConfig()
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
std::string progress = config().getString("progress", "tty");
|
|
||||||
need_render_progress = (Poco::icompare(progress, "off") && Poco::icompare(progress, "no") && Poco::icompare(progress, "false") && Poco::icompare(progress, "0"));
|
|
||||||
echo_queries = config().hasOption("echo") || config().hasOption("verbose");
|
echo_queries = config().hasOption("echo") || config().hasOption("verbose");
|
||||||
ignore_error = config().getBool("ignore-error", false);
|
ignore_error = config().getBool("ignore-error", false);
|
||||||
is_multiquery = true;
|
is_multiquery = true;
|
||||||
|
@ -123,7 +123,7 @@ void MetricsTransmitter::transmit(std::vector<ProfileEvents::Count> & prev_count
|
|||||||
{
|
{
|
||||||
for (const auto & name_value : async_metrics_values)
|
for (const auto & name_value : async_metrics_values)
|
||||||
{
|
{
|
||||||
key_vals.emplace_back(asynchronous_metrics_path_prefix + name_value.first, name_value.second);
|
key_vals.emplace_back(asynchronous_metrics_path_prefix + name_value.first, name_value.second.value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,6 +60,7 @@
|
|||||||
#include <Storages/System/attachInformationSchemaTables.h>
|
#include <Storages/System/attachInformationSchemaTables.h>
|
||||||
#include <Storages/Cache/ExternalDataSourceCache.h>
|
#include <Storages/Cache/ExternalDataSourceCache.h>
|
||||||
#include <Storages/Cache/registerRemoteFileMetadatas.h>
|
#include <Storages/Cache/registerRemoteFileMetadatas.h>
|
||||||
|
#include <Storages/NamedCollections.h>
|
||||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||||
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
|
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
|
||||||
#include <Functions/registerFunctions.h>
|
#include <Functions/registerFunctions.h>
|
||||||
@ -341,19 +342,7 @@ Poco::Net::SocketAddress Server::socketBindListen(
|
|||||||
[[maybe_unused]] bool secure) const
|
[[maybe_unused]] bool secure) const
|
||||||
{
|
{
|
||||||
auto address = makeSocketAddress(host, port, &logger());
|
auto address = makeSocketAddress(host, port, &logger());
|
||||||
#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100
|
|
||||||
if (secure)
|
|
||||||
/// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl
|
|
||||||
/// https://github.com/pocoproject/poco/pull/2257
|
|
||||||
socket.bind(address, /* reuseAddress = */ true);
|
|
||||||
else
|
|
||||||
#endif
|
|
||||||
#if POCO_VERSION < 0x01080000
|
|
||||||
socket.bind(address, /* reuseAddress = */ true);
|
|
||||||
#else
|
|
||||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config.getBool("listen_reuse_port", false));
|
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config.getBool("listen_reuse_port", false));
|
||||||
#endif
|
|
||||||
|
|
||||||
/// If caller requests any available port from the OS, discover it after binding.
|
/// If caller requests any available port from the OS, discover it after binding.
|
||||||
if (port == 0)
|
if (port == 0)
|
||||||
{
|
{
|
||||||
@ -732,6 +721,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
config().getUInt("max_io_thread_pool_free_size", 0),
|
config().getUInt("max_io_thread_pool_free_size", 0),
|
||||||
config().getUInt("io_thread_pool_queue_size", 10000));
|
config().getUInt("io_thread_pool_queue_size", 10000));
|
||||||
|
|
||||||
|
NamedCollectionFactory::instance().initialize(config());
|
||||||
|
|
||||||
/// Initialize global local cache for remote filesystem.
|
/// Initialize global local cache for remote filesystem.
|
||||||
if (config().has("local_cache_for_remote_fs"))
|
if (config().has("local_cache_for_remote_fs"))
|
||||||
{
|
{
|
||||||
@ -805,41 +796,43 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
/// that are interpreted (not executed) but can alter the behaviour of the program as well.
|
/// that are interpreted (not executed) but can alter the behaviour of the program as well.
|
||||||
|
|
||||||
/// Please keep the below log messages in-sync with the ones in daemon/BaseDaemon.cpp
|
/// Please keep the below log messages in-sync with the ones in daemon/BaseDaemon.cpp
|
||||||
|
|
||||||
String calculated_binary_hash = getHashOfLoadedBinaryHex();
|
|
||||||
|
|
||||||
if (stored_binary_hash.empty())
|
if (stored_binary_hash.empty())
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Integrity check of the executable skipped because the reference checksum could not be read."
|
LOG_WARNING(log, "Integrity check of the executable skipped because the reference checksum could not be read.");
|
||||||
" (calculated checksum: {})", calculated_binary_hash);
|
|
||||||
}
|
|
||||||
else if (calculated_binary_hash == stored_binary_hash)
|
|
||||||
{
|
|
||||||
LOG_INFO(log, "Integrity check of the executable successfully passed (checksum: {})", calculated_binary_hash);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/// If program is run under debugger, ptrace will fail.
|
String calculated_binary_hash = getHashOfLoadedBinaryHex();
|
||||||
if (ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == -1)
|
if (calculated_binary_hash == stored_binary_hash)
|
||||||
{
|
{
|
||||||
/// Program is run under debugger. Modification of it's binary image is ok for breakpoints.
|
LOG_INFO(log, "Integrity check of the executable successfully passed (checksum: {})", calculated_binary_hash);
|
||||||
global_context->addWarningMessage(
|
|
||||||
fmt::format("Server is run under debugger and its binary image is modified (most likely with breakpoints).",
|
|
||||||
calculated_binary_hash)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::CORRUPTED_DATA,
|
/// If program is run under debugger, ptrace will fail.
|
||||||
"Calculated checksum of the executable ({0}) does not correspond"
|
if (ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == -1)
|
||||||
" to the reference checksum stored in the executable ({1})."
|
{
|
||||||
" This may indicate one of the following:"
|
/// Program is run under debugger. Modification of it's binary image is ok for breakpoints.
|
||||||
" - the executable {2} was changed just after startup;"
|
global_context->addWarningMessage(fmt::format(
|
||||||
" - the executable {2} was corrupted on disk due to faulty hardware;"
|
"Server is run under debugger and its binary image is modified (most likely with breakpoints).",
|
||||||
" - the loaded executable was corrupted in memory due to faulty hardware;"
|
calculated_binary_hash));
|
||||||
" - the file {2} was intentionally modified;"
|
}
|
||||||
" - a logical error in the code."
|
else
|
||||||
, calculated_binary_hash, stored_binary_hash, executable_path);
|
{
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::CORRUPTED_DATA,
|
||||||
|
"Calculated checksum of the executable ({0}) does not correspond"
|
||||||
|
" to the reference checksum stored in the executable ({1})."
|
||||||
|
" This may indicate one of the following:"
|
||||||
|
" - the executable {2} was changed just after startup;"
|
||||||
|
" - the executable {2} was corrupted on disk due to faulty hardware;"
|
||||||
|
" - the loaded executable was corrupted in memory due to faulty hardware;"
|
||||||
|
" - the file {2} was intentionally modified;"
|
||||||
|
" - a logical error in the code.",
|
||||||
|
calculated_binary_hash,
|
||||||
|
stored_binary_hash,
|
||||||
|
executable_path);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1279,6 +1272,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
#if USE_SSL
|
#if USE_SSL
|
||||||
CertificateReloader::instance().tryLoad(*config);
|
CertificateReloader::instance().tryLoad(*config);
|
||||||
#endif
|
#endif
|
||||||
|
NamedCollectionFactory::instance().reload(*config);
|
||||||
ProfileEvents::increment(ProfileEvents::MainConfigLoads);
|
ProfileEvents::increment(ProfileEvents::MainConfigLoads);
|
||||||
|
|
||||||
/// Must be the last.
|
/// Must be the last.
|
||||||
@ -1486,11 +1480,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT({
|
||||||
/// Stop reloading of the main config. This must be done before `global_context->shutdown()` because
|
|
||||||
/// otherwise the reloading may pass a changed config to some destroyed parts of ContextSharedPart.
|
|
||||||
main_config_reloader.reset();
|
|
||||||
access_control.stopPeriodicReloading();
|
|
||||||
|
|
||||||
async_metrics.stop();
|
async_metrics.stop();
|
||||||
|
|
||||||
/** Ask to cancel background jobs all table engines,
|
/** Ask to cancel background jobs all table engines,
|
||||||
@ -1789,10 +1778,17 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
|
|
||||||
SCOPE_EXIT_SAFE({
|
SCOPE_EXIT_SAFE({
|
||||||
LOG_DEBUG(log, "Received termination signal.");
|
LOG_DEBUG(log, "Received termination signal.");
|
||||||
LOG_DEBUG(log, "Waiting for current connections to close.");
|
|
||||||
|
/// Stop reloading of the main config. This must be done before everything else because it
|
||||||
|
/// can try to access/modify already deleted objects.
|
||||||
|
/// E.g. it can recreate new servers or it may pass a changed config to some destroyed parts of ContextSharedPart.
|
||||||
|
main_config_reloader.reset();
|
||||||
|
access_control.stopPeriodicReloading();
|
||||||
|
|
||||||
is_cancelled = true;
|
is_cancelled = true;
|
||||||
|
|
||||||
|
LOG_DEBUG(log, "Waiting for current connections to close.");
|
||||||
|
|
||||||
size_t current_connections = 0;
|
size_t current_connections = 0;
|
||||||
{
|
{
|
||||||
std::lock_guard lock(servers_lock);
|
std::lock_guard lock(servers_lock);
|
||||||
|
@ -130,6 +130,7 @@ enum class AccessType
|
|||||||
M(SHOW_ROW_POLICIES, "SHOW POLICIES, SHOW CREATE ROW POLICY, SHOW CREATE POLICY", TABLE, SHOW_ACCESS) \
|
M(SHOW_ROW_POLICIES, "SHOW POLICIES, SHOW CREATE ROW POLICY, SHOW CREATE POLICY", TABLE, SHOW_ACCESS) \
|
||||||
M(SHOW_QUOTAS, "SHOW CREATE QUOTA", GLOBAL, SHOW_ACCESS) \
|
M(SHOW_QUOTAS, "SHOW CREATE QUOTA", GLOBAL, SHOW_ACCESS) \
|
||||||
M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \
|
M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \
|
||||||
|
M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", GLOBAL, SHOW_ACCESS) \
|
||||||
M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \
|
M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \
|
||||||
M(ACCESS_MANAGEMENT, "", GROUP, ALL) \
|
M(ACCESS_MANAGEMENT, "", GROUP, ALL) \
|
||||||
\
|
\
|
||||||
|
@ -465,6 +465,17 @@ std::shared_ptr<const AccessRights> ContextAccess::getAccessRightsWithImplicit()
|
|||||||
template <bool throw_if_denied, bool grant_option, typename... Args>
|
template <bool throw_if_denied, bool grant_option, typename... Args>
|
||||||
bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... args) const
|
bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... args) const
|
||||||
{
|
{
|
||||||
|
if (user_was_dropped)
|
||||||
|
{
|
||||||
|
/// If the current user has been dropped we always throw an exception (even if `throw_if_denied` is false)
|
||||||
|
/// because dropping of the current user is considered as a situation which is exceptional enough to stop
|
||||||
|
/// query execution.
|
||||||
|
throw Exception(getUserName() + ": User has been dropped", ErrorCodes::UNKNOWN_USER);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_full_access)
|
||||||
|
return true;
|
||||||
|
|
||||||
auto access_granted = [&]
|
auto access_granted = [&]
|
||||||
{
|
{
|
||||||
if (trace_log)
|
if (trace_log)
|
||||||
@ -483,12 +494,6 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
|
|||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (is_full_access)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if (user_was_dropped)
|
|
||||||
return access_denied("User has been dropped", ErrorCodes::UNKNOWN_USER);
|
|
||||||
|
|
||||||
if (flags & AccessType::CLUSTER && !access_control->doesOnClusterQueriesRequireClusterGrant())
|
if (flags & AccessType::CLUSTER && !access_control->doesOnClusterQueriesRequireClusterGrant())
|
||||||
flags &= ~AccessType::CLUSTER;
|
flags &= ~AccessType::CLUSTER;
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ public:
|
|||||||
void deserialize(AggregateDataPtr __restrict /* place */, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
void deserialize(AggregateDataPtr __restrict /* place */, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||||
{
|
{
|
||||||
char c = 0;
|
char c = 0;
|
||||||
buf.read(c);
|
buf.readStrict(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
void insertResultInto(AggregateDataPtr __restrict, IColumn & to, Arena *) const override
|
void insertResultInto(AggregateDataPtr __restrict, IColumn & to, Arena *) const override
|
||||||
|
@ -118,7 +118,7 @@ public:
|
|||||||
|
|
||||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||||
{
|
{
|
||||||
buf.read(place, sizeOfData());
|
buf.readStrict(place, sizeOfData());
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr getReturnType() const override
|
DataTypePtr getReturnType() const override
|
||||||
|
@ -270,7 +270,7 @@ public:
|
|||||||
auto & value = this->data(place).value;
|
auto & value = this->data(place).value;
|
||||||
|
|
||||||
value.resize(size, arena);
|
value.resize(size, arena);
|
||||||
buf.read(reinterpret_cast<char *>(value.data()), size * sizeof(value[0]));
|
buf.readStrict(reinterpret_cast<char *>(value.data()), size * sizeof(value[0]));
|
||||||
|
|
||||||
if constexpr (Trait::sampler == Sampler::RNG)
|
if constexpr (Trait::sampler == Sampler::RNG)
|
||||||
{
|
{
|
||||||
@ -343,7 +343,7 @@ struct GroupArrayNodeBase
|
|||||||
|
|
||||||
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + size, alignof(Node)));
|
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + size, alignof(Node)));
|
||||||
node->size = size;
|
node->size = size;
|
||||||
buf.read(node->data(), size);
|
buf.readStrict(node->data(), size);
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -144,7 +144,7 @@ public:
|
|||||||
{
|
{
|
||||||
auto & value = this->data(place).value;
|
auto & value = this->data(place).value;
|
||||||
value.resize(size, arena);
|
value.resize(size, arena);
|
||||||
buf.read(reinterpret_cast<char *>(value.data()), size * sizeof(value[0]));
|
buf.readStrict(reinterpret_cast<char *>(value.data()), size * sizeof(value[0]));
|
||||||
this->data(place).sum = value.back();
|
this->data(place).sum = value.back();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -293,7 +293,7 @@ public:
|
|||||||
if (size > max_bins * 2)
|
if (size > max_bins * 2)
|
||||||
throw Exception("Too many bins", ErrorCodes::TOO_LARGE_ARRAY_SIZE);
|
throw Exception("Too many bins", ErrorCodes::TOO_LARGE_ARRAY_SIZE);
|
||||||
|
|
||||||
buf.read(reinterpret_cast<char *>(points), size * sizeof(WeightedValue));
|
buf.readStrict(reinterpret_cast<char *>(points), size * sizeof(WeightedValue));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ public:
|
|||||||
auto & value = this->data(place).value;
|
auto & value = this->data(place).value;
|
||||||
|
|
||||||
value.resize(size, arena);
|
value.resize(size, arena);
|
||||||
buf.read(reinterpret_cast<char *>(value.data()), size * sizeof(value[0]));
|
buf.readStrict(reinterpret_cast<char *>(value.data()), size * sizeof(value[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||||
|
@ -515,7 +515,7 @@ public:
|
|||||||
size = rhs_size;
|
size = rhs_size;
|
||||||
|
|
||||||
if (size > 0)
|
if (size > 0)
|
||||||
buf.read(small_data, size);
|
buf.readStrict(small_data, size);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -527,7 +527,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
size = rhs_size;
|
size = rhs_size;
|
||||||
buf.read(large_data, size);
|
buf.readStrict(large_data, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -29,6 +29,11 @@ namespace DB
|
|||||||
{
|
{
|
||||||
struct Settings;
|
struct Settings;
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int TOO_LARGE_ARRAY_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
enum class SequenceDirection
|
enum class SequenceDirection
|
||||||
{
|
{
|
||||||
Forward,
|
Forward,
|
||||||
@ -43,6 +48,9 @@ enum SequenceBase
|
|||||||
LastMatch,
|
LastMatch,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// This is for security
|
||||||
|
static const UInt64 max_node_size_deserialize = 0xFFFFFF;
|
||||||
|
|
||||||
/// NodeBase used to implement a linked list for storage of SequenceNextNodeImpl
|
/// NodeBase used to implement a linked list for storage of SequenceNextNodeImpl
|
||||||
template <typename Node, size_t MaxEventsSize>
|
template <typename Node, size_t MaxEventsSize>
|
||||||
struct NodeBase
|
struct NodeBase
|
||||||
@ -78,10 +86,12 @@ struct NodeBase
|
|||||||
{
|
{
|
||||||
UInt64 size;
|
UInt64 size;
|
||||||
readVarUInt(size, buf);
|
readVarUInt(size, buf);
|
||||||
|
if unlikely (size > max_node_size_deserialize)
|
||||||
|
throw Exception("Too large node state size", ErrorCodes::TOO_LARGE_ARRAY_SIZE);
|
||||||
|
|
||||||
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + size, alignof(Node)));
|
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + size, alignof(Node)));
|
||||||
node->size = size;
|
node->size = size;
|
||||||
buf.read(node->data(), size);
|
buf.readStrict(node->data(), size);
|
||||||
|
|
||||||
readBinary(node->event_time, buf);
|
readBinary(node->event_time, buf);
|
||||||
UInt64 ulong_bitset;
|
UInt64 ulong_bitset;
|
||||||
|
@ -108,7 +108,7 @@ struct AggregateFunctionUniqUpToData
|
|||||||
readBinary(count, rb);
|
readBinary(count, rb);
|
||||||
|
|
||||||
if (count <= threshold)
|
if (count <= threshold)
|
||||||
rb.read(data_ptr, count * sizeof(T));
|
rb.readStrict(data_ptr, count * sizeof(T));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ALWAYS_INLINE is required to have better code layout for uniqUpTo function
|
/// ALWAYS_INLINE is required to have better code layout for uniqUpTo function
|
||||||
|
@ -55,7 +55,7 @@ struct QuantileExactBase
|
|||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
readVarUInt(size, buf);
|
readVarUInt(size, buf);
|
||||||
array.resize(size);
|
array.resize(size);
|
||||||
buf.read(reinterpret_cast<char *>(array.data()), size * sizeof(array[0]));
|
buf.readStrict(reinterpret_cast<char *>(array.data()), size * sizeof(array[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
Value get(Float64 level)
|
Value get(Float64 level)
|
||||||
|
@ -316,7 +316,7 @@ public:
|
|||||||
|
|
||||||
centroids.resize(size);
|
centroids.resize(size);
|
||||||
// From now, TDigest will be in invalid state if exception is thrown.
|
// From now, TDigest will be in invalid state if exception is thrown.
|
||||||
buf.read(reinterpret_cast<char *>(centroids.data()), size * sizeof(centroids[0]));
|
buf.readStrict(reinterpret_cast<char *>(centroids.data()), size * sizeof(centroids[0]));
|
||||||
|
|
||||||
for (const auto & c : centroids)
|
for (const auto & c : centroids)
|
||||||
{
|
{
|
||||||
|
@ -112,8 +112,8 @@ struct StatisticalSample
|
|||||||
readVarUInt(size_y, buf);
|
readVarUInt(size_y, buf);
|
||||||
x.resize(size_x, arena);
|
x.resize(size_x, arena);
|
||||||
y.resize(size_y, arena);
|
y.resize(size_y, arena);
|
||||||
buf.read(reinterpret_cast<char *>(x.data()), size_x * sizeof(x[0]));
|
buf.readStrict(reinterpret_cast<char *>(x.data()), size_x * sizeof(x[0]));
|
||||||
buf.read(reinterpret_cast<char *>(y.data()), size_y * sizeof(y[0]));
|
buf.readStrict(reinterpret_cast<char *>(y.data()), size_y * sizeof(y[0]));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -33,18 +33,27 @@ public:
|
|||||||
|
|
||||||
if (function_node->getFunctionName() == "count" && !first_argument_constant_literal.isNull())
|
if (function_node->getFunctionName() == "count" && !first_argument_constant_literal.isNull())
|
||||||
{
|
{
|
||||||
|
resolveAsCountAggregateFunction(*function_node);
|
||||||
function_node->getArguments().getNodes().clear();
|
function_node->getArguments().getNodes().clear();
|
||||||
}
|
}
|
||||||
else if (function_node->getFunctionName() == "sum" && first_argument_constant_literal.getType() == Field::Types::UInt64 &&
|
else if (function_node->getFunctionName() == "sum" &&
|
||||||
|
first_argument_constant_literal.getType() == Field::Types::UInt64 &&
|
||||||
first_argument_constant_literal.get<UInt64>() == 1)
|
first_argument_constant_literal.get<UInt64>() == 1)
|
||||||
{
|
{
|
||||||
auto result_type = function_node->getResultType();
|
resolveAsCountAggregateFunction(*function_node);
|
||||||
AggregateFunctionProperties properties;
|
|
||||||
auto aggregate_function = AggregateFunctionFactory::instance().get("count", {}, {}, properties);
|
|
||||||
function_node->resolveAsAggregateFunction(std::move(aggregate_function), std::move(result_type));
|
|
||||||
function_node->getArguments().getNodes().clear();
|
function_node->getArguments().getNodes().clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
private:
|
||||||
|
static inline void resolveAsCountAggregateFunction(FunctionNode & function_node)
|
||||||
|
{
|
||||||
|
auto function_result_type = function_node.getResultType();
|
||||||
|
|
||||||
|
AggregateFunctionProperties properties;
|
||||||
|
auto aggregate_function = AggregateFunctionFactory::instance().get("count", {}, {}, properties);
|
||||||
|
|
||||||
|
function_node.resolveAsAggregateFunction(std::move(aggregate_function), std::move(function_result_type));
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -442,7 +442,7 @@ void BackupEntriesCollector::gatherTablesMetadata()
|
|||||||
if (it != database_info.tables.end())
|
if (it != database_info.tables.end())
|
||||||
{
|
{
|
||||||
const auto & partitions = it->second.partitions;
|
const auto & partitions = it->second.partitions;
|
||||||
if (partitions && !storage->supportsBackupPartition())
|
if (partitions && storage && !storage->supportsBackupPartition())
|
||||||
{
|
{
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::CANNOT_BACKUP_TABLE,
|
ErrorCodes::CANNOT_BACKUP_TABLE,
|
||||||
|
@ -119,22 +119,27 @@ namespace ProfileEvents
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
ProgressOption toProgressOption(std::string progress)
|
||||||
|
{
|
||||||
|
boost::to_upper(progress);
|
||||||
|
|
||||||
|
if (progress == "OFF" || progress == "FALSE" || progress == "0" || progress == "NO")
|
||||||
|
return ProgressOption::OFF;
|
||||||
|
if (progress == "TTY" || progress == "ON" || progress == "TRUE" || progress == "1" || progress == "YES")
|
||||||
|
return ProgressOption::TTY;
|
||||||
|
if (progress == "ERR")
|
||||||
|
return ProgressOption::ERR;
|
||||||
|
if (progress == "DEFAULT")
|
||||||
|
return ProgressOption::DEFAULT;
|
||||||
|
|
||||||
|
throw boost::program_options::validation_error(boost::program_options::validation_error::invalid_option_value);
|
||||||
|
}
|
||||||
|
|
||||||
std::istream& operator>> (std::istream & in, ProgressOption & progress)
|
std::istream& operator>> (std::istream & in, ProgressOption & progress)
|
||||||
{
|
{
|
||||||
std::string token;
|
std::string token;
|
||||||
in >> token;
|
in >> token;
|
||||||
|
progress = toProgressOption(token);
|
||||||
boost::to_upper(token);
|
|
||||||
|
|
||||||
if (token == "OFF" || token == "FALSE" || token == "0" || token == "NO")
|
|
||||||
progress = ProgressOption::OFF;
|
|
||||||
else if (token == "TTY" || token == "ON" || token == "TRUE" || token == "1" || token == "YES")
|
|
||||||
progress = ProgressOption::TTY;
|
|
||||||
else if (token == "ERR")
|
|
||||||
progress = ProgressOption::ERR;
|
|
||||||
else
|
|
||||||
throw boost::program_options::validation_error(boost::program_options::validation_error::invalid_option_value);
|
|
||||||
|
|
||||||
return in;
|
return in;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -662,56 +667,62 @@ void ClientBase::initLogsOutputStream()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClientBase::initTtyBuffer(bool to_err)
|
void ClientBase::initTtyBuffer(ProgressOption progress)
|
||||||
{
|
{
|
||||||
if (!tty_buf)
|
if (tty_buf)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (progress == ProgressOption::OFF || (!is_interactive && progress == ProgressOption::DEFAULT))
|
||||||
{
|
{
|
||||||
static constexpr auto tty_file_name = "/dev/tty";
|
need_render_progress = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/// Output all progress bar commands to terminal at once to avoid flicker.
|
static constexpr auto tty_file_name = "/dev/tty";
|
||||||
/// This size is usually greater than the window size.
|
|
||||||
static constexpr size_t buf_size = 1024;
|
|
||||||
|
|
||||||
if (!to_err)
|
/// Output all progress bar commands to terminal at once to avoid flicker.
|
||||||
|
/// This size is usually greater than the window size.
|
||||||
|
static constexpr size_t buf_size = 1024;
|
||||||
|
|
||||||
|
if (is_interactive || progress == ProgressOption::TTY)
|
||||||
|
{
|
||||||
|
std::error_code ec;
|
||||||
|
std::filesystem::file_status tty = std::filesystem::status(tty_file_name, ec);
|
||||||
|
|
||||||
|
if (!ec && exists(tty) && is_character_file(tty)
|
||||||
|
&& (tty.permissions() & std::filesystem::perms::others_write) != std::filesystem::perms::none)
|
||||||
{
|
{
|
||||||
std::error_code ec;
|
try
|
||||||
std::filesystem::file_status tty = std::filesystem::status(tty_file_name, ec);
|
|
||||||
|
|
||||||
if (!ec && exists(tty) && is_character_file(tty)
|
|
||||||
&& (tty.permissions() & std::filesystem::perms::others_write) != std::filesystem::perms::none)
|
|
||||||
{
|
{
|
||||||
try
|
tty_buf = std::make_unique<WriteBufferFromFile>(tty_file_name, buf_size);
|
||||||
{
|
|
||||||
tty_buf = std::make_unique<WriteBufferFromFile>(tty_file_name, buf_size);
|
|
||||||
|
|
||||||
/// It is possible that the terminal file has writeable permissions
|
/// It is possible that the terminal file has writeable permissions
|
||||||
/// but we cannot write anything there. Check it with invisible character.
|
/// but we cannot write anything there. Check it with invisible character.
|
||||||
tty_buf->write('\0');
|
tty_buf->write('\0');
|
||||||
tty_buf->next();
|
tty_buf->next();
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
{
|
{
|
||||||
if (tty_buf)
|
if (tty_buf)
|
||||||
tty_buf.reset();
|
tty_buf.reset();
|
||||||
|
|
||||||
if (e.code() != ErrorCodes::CANNOT_OPEN_FILE)
|
if (e.code() != ErrorCodes::CANNOT_OPEN_FILE)
|
||||||
throw;
|
throw;
|
||||||
|
|
||||||
/// It is normal if file exists, indicated as writeable but still cannot be opened.
|
/// It is normal if file exists, indicated as writeable but still cannot be opened.
|
||||||
/// Fallback to other options.
|
/// Fallback to other options.
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stderr_is_a_tty)
|
|
||||||
{
|
|
||||||
tty_buf = std::make_unique<WriteBufferFromFileDescriptor>(STDERR_FILENO, buf_size);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
need_render_progress = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (stderr_is_a_tty || progress == ProgressOption::ERR)
|
||||||
|
{
|
||||||
|
tty_buf = std::make_unique<WriteBufferFromFileDescriptor>(STDERR_FILENO, buf_size);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
need_render_progress = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClientBase::updateSuggest(const ASTPtr & ast)
|
void ClientBase::updateSuggest(const ASTPtr & ast)
|
||||||
@ -1617,6 +1628,14 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
|||||||
global_context->applySettingChange(change);
|
global_context->applySettingChange(change);
|
||||||
}
|
}
|
||||||
global_context->resetSettingsToDefaultValue(set_query->default_settings);
|
global_context->resetSettingsToDefaultValue(set_query->default_settings);
|
||||||
|
|
||||||
|
/// Query parameters inside SET queries should be also saved on the client side
|
||||||
|
/// to override their previous definitions set with --param_* arguments
|
||||||
|
/// and for substitutions to work inside INSERT ... VALUES queries
|
||||||
|
for (const auto & [name, value] : set_query->query_parameters)
|
||||||
|
query_parameters.insert_or_assign(name, value);
|
||||||
|
|
||||||
|
global_context->addQueryParameters(set_query->query_parameters);
|
||||||
}
|
}
|
||||||
if (const auto * use_query = parsed_query->as<ASTUseQuery>())
|
if (const auto * use_query = parsed_query->as<ASTUseQuery>())
|
||||||
{
|
{
|
||||||
@ -2324,7 +2343,7 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
("stage", po::value<std::string>()->default_value("complete"), "Request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation,with_mergeable_state_after_aggregation_and_limit")
|
("stage", po::value<std::string>()->default_value("complete"), "Request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation,with_mergeable_state_after_aggregation_and_limit")
|
||||||
("query_kind", po::value<std::string>()->default_value("initial_query"), "One of initial_query/secondary_query/no_query")
|
("query_kind", po::value<std::string>()->default_value("initial_query"), "One of initial_query/secondary_query/no_query")
|
||||||
("query_id", po::value<std::string>(), "query_id")
|
("query_id", po::value<std::string>(), "query_id")
|
||||||
("progress", po::value<ProgressOption>()->implicit_value(ProgressOption::TTY, "tty")->default_value(ProgressOption::TTY, "tty"), "Print progress of queries execution - to TTY (default): tty|on|1|true|yes; to STDERR: err; OFF: off|0|false|no")
|
("progress", po::value<ProgressOption>()->implicit_value(ProgressOption::TTY, "tty")->default_value(ProgressOption::DEFAULT, "default"), "Print progress of queries execution - to TTY: tty|on|1|true|yes; to STDERR non-interactive mode: err; OFF: off|0|false|no; DEFAULT - interactive to TTY, non-interactive is off")
|
||||||
|
|
||||||
("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.")
|
("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.")
|
||||||
("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)")
|
("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)")
|
||||||
@ -2379,11 +2398,6 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
parseAndCheckOptions(options_description, options, common_arguments);
|
parseAndCheckOptions(options_description, options, common_arguments);
|
||||||
po::notify(options);
|
po::notify(options);
|
||||||
|
|
||||||
if (options["progress"].as<ProgressOption>() == ProgressOption::OFF)
|
|
||||||
need_render_progress = false;
|
|
||||||
else
|
|
||||||
initTtyBuffer(options["progress"].as<ProgressOption>() == ProgressOption::ERR);
|
|
||||||
|
|
||||||
if (options.count("version") || options.count("V"))
|
if (options.count("version") || options.count("V"))
|
||||||
{
|
{
|
||||||
showClientVersion();
|
showClientVersion();
|
||||||
@ -2437,6 +2451,9 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
{
|
{
|
||||||
switch (options["progress"].as<ProgressOption>())
|
switch (options["progress"].as<ProgressOption>())
|
||||||
{
|
{
|
||||||
|
case DEFAULT:
|
||||||
|
config().setString("progress", "default");
|
||||||
|
break;
|
||||||
case OFF:
|
case OFF:
|
||||||
config().setString("progress", "off");
|
config().setString("progress", "off");
|
||||||
break;
|
break;
|
||||||
|
@ -38,10 +38,12 @@ enum MultiQueryProcessingStage
|
|||||||
|
|
||||||
enum ProgressOption
|
enum ProgressOption
|
||||||
{
|
{
|
||||||
|
DEFAULT,
|
||||||
OFF,
|
OFF,
|
||||||
TTY,
|
TTY,
|
||||||
ERR,
|
ERR,
|
||||||
};
|
};
|
||||||
|
ProgressOption toProgressOption(std::string progress);
|
||||||
std::istream& operator>> (std::istream & in, ProgressOption & progress);
|
std::istream& operator>> (std::istream & in, ProgressOption & progress);
|
||||||
|
|
||||||
void interruptSignalHandler(int signum);
|
void interruptSignalHandler(int signum);
|
||||||
@ -153,7 +155,6 @@ private:
|
|||||||
|
|
||||||
void initOutputFormat(const Block & block, ASTPtr parsed_query);
|
void initOutputFormat(const Block & block, ASTPtr parsed_query);
|
||||||
void initLogsOutputStream();
|
void initLogsOutputStream();
|
||||||
void initTtyBuffer(bool to_err = false);
|
|
||||||
|
|
||||||
String prompt() const;
|
String prompt() const;
|
||||||
|
|
||||||
@ -168,6 +169,8 @@ protected:
|
|||||||
static bool isSyncInsertWithData(const ASTInsertQuery & insert_query, const ContextPtr & context);
|
static bool isSyncInsertWithData(const ASTInsertQuery & insert_query, const ContextPtr & context);
|
||||||
bool processMultiQueryFromFile(const String & file_name);
|
bool processMultiQueryFromFile(const String & file_name);
|
||||||
|
|
||||||
|
void initTtyBuffer(ProgressOption progress);
|
||||||
|
|
||||||
bool is_interactive = false; /// Use either interactive line editing interface or batch mode.
|
bool is_interactive = false; /// Use either interactive line editing interface or batch mode.
|
||||||
bool is_multiquery = false;
|
bool is_multiquery = false;
|
||||||
bool delayed_interactive = false;
|
bool delayed_interactive = false;
|
||||||
|
@ -637,6 +637,8 @@
|
|||||||
M(666, CANNOT_USE_CACHE) \
|
M(666, CANNOT_USE_CACHE) \
|
||||||
M(667, NOT_INITIALIZED) \
|
M(667, NOT_INITIALIZED) \
|
||||||
M(668, INVALID_STATE) \
|
M(668, INVALID_STATE) \
|
||||||
|
M(669, UNKNOWN_NAMED_COLLECTION) \
|
||||||
|
M(670, NAMED_COLLECTION_ALREADY_EXISTS) \
|
||||||
\
|
\
|
||||||
M(999, KEEPER_EXCEPTION) \
|
M(999, KEEPER_EXCEPTION) \
|
||||||
M(1000, POCO_EXCEPTION) \
|
M(1000, POCO_EXCEPTION) \
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
|
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Poco { class Logger; }
|
namespace Poco { class Logger; }
|
||||||
|
|
||||||
|
|
||||||
@ -121,11 +122,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::string displayText() const
|
std::string displayText() const override;
|
||||||
#if defined(POCO_CLICKHOUSE_PATCH)
|
|
||||||
override
|
|
||||||
#endif
|
|
||||||
;
|
|
||||||
|
|
||||||
ssize_t getLineNumber() const { return line_number; }
|
ssize_t getLineNumber() const { return line_number; }
|
||||||
void setLineNumber(int line_number_) { line_number = line_number_;}
|
void setLineNumber(int line_number_) { line_number = line_number_;}
|
||||||
|
@ -63,6 +63,8 @@ private:
|
|||||||
using StopwatchUniquePtr = std::unique_ptr<Stopwatch>;
|
using StopwatchUniquePtr = std::unique_ptr<Stopwatch>;
|
||||||
|
|
||||||
|
|
||||||
|
/// Allows to obtain the elapsed time concurrently with restarting the stopwatch.
|
||||||
|
/// Allows to atomically compare the elapsed time with a threshold and restart the watch if the elapsed time is not less.
|
||||||
class AtomicStopwatch
|
class AtomicStopwatch
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
@ -143,7 +143,10 @@ void read(std::string & s, ReadBuffer & in)
|
|||||||
throw Exception("Too large string size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
throw Exception("Too large string size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||||
|
|
||||||
s.resize(size);
|
s.resize(size);
|
||||||
in.read(s.data(), size);
|
size_t read_bytes = in.read(s.data(), size);
|
||||||
|
if (read_bytes != static_cast<size_t>(size))
|
||||||
|
throw Exception(
|
||||||
|
Error::ZMARSHALLINGERROR, "Buffer size read from Zookeeper is not big enough. Expected {}. Got {}", size, read_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
void read(ACL & acl, ReadBuffer & in)
|
void read(ACL & acl, ReadBuffer & in)
|
||||||
|
@ -67,7 +67,7 @@ void read(std::array<char, N> & s, ReadBuffer & in)
|
|||||||
read(size, in);
|
read(size, in);
|
||||||
if (size != N)
|
if (size != N)
|
||||||
throw Exception("Unexpected array size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
throw Exception("Unexpected array size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||||
in.read(s.data(), N);
|
in.readStrict(s.data(), N);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
@ -21,7 +21,7 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t readBig(char * to, size_t n) override;
|
[[nodiscard]] size_t readBig(char * to, size_t n) override;
|
||||||
|
|
||||||
/// The compressed size of the current block.
|
/// The compressed size of the current block.
|
||||||
size_t getSizeCompressed() const
|
size_t getSizeCompressed() const
|
||||||
|
@ -53,7 +53,7 @@ public:
|
|||||||
/// we store this offset inside nextimpl_working_buffer_offset.
|
/// we store this offset inside nextimpl_working_buffer_offset.
|
||||||
void seek(size_t offset_in_compressed_file, size_t offset_in_decompressed_block) override;
|
void seek(size_t offset_in_compressed_file, size_t offset_in_decompressed_block) override;
|
||||||
|
|
||||||
size_t readBig(char * to, size_t n) override;
|
[[nodiscard]] size_t readBig(char * to, size_t n) override;
|
||||||
|
|
||||||
void setProfileCallback(const ReadBufferFromFileBase::ProfileCallback & profile_callback_, clockid_t clock_type_ = CLOCK_MONOTONIC_COARSE)
|
void setProfileCallback(const ReadBufferFromFileBase::ProfileCallback & profile_callback_, clockid_t clock_type_ = CLOCK_MONOTONIC_COARSE)
|
||||||
{
|
{
|
||||||
|
@ -240,7 +240,7 @@ Int32 HardwareCodecDeflateQpl::doDecompressDataAsynchronous(const char * source,
|
|||||||
|
|
||||||
void HardwareCodecDeflateQpl::flushAsynchronousDecompressRequests()
|
void HardwareCodecDeflateQpl::flushAsynchronousDecompressRequests()
|
||||||
{
|
{
|
||||||
UInt32 n_jobs_processing = decomp_async_job_map.size();
|
auto n_jobs_processing = decomp_async_job_map.size();
|
||||||
std::map<UInt32, qpl_job *>::iterator it = decomp_async_job_map.begin();
|
std::map<UInt32, qpl_job *>::iterator it = decomp_async_job_map.begin();
|
||||||
|
|
||||||
while (n_jobs_processing)
|
while (n_jobs_processing)
|
||||||
|
@ -349,7 +349,7 @@ nuraft::ptr<nuraft::srv_state> KeeperStateManager::read_state()
|
|||||||
auto buffer_size = content_size - sizeof read_checksum - sizeof version;
|
auto buffer_size = content_size - sizeof read_checksum - sizeof version;
|
||||||
|
|
||||||
auto state_buf = nuraft::buffer::alloc(buffer_size);
|
auto state_buf = nuraft::buffer::alloc(buffer_size);
|
||||||
read_buf.read(reinterpret_cast<char *>(state_buf->data_begin()), buffer_size);
|
read_buf.readStrict(reinterpret_cast<char *>(state_buf->data_begin()), buffer_size);
|
||||||
|
|
||||||
SipHash hash;
|
SipHash hash;
|
||||||
hash.update(version);
|
hash.update(version);
|
||||||
|
@ -116,7 +116,8 @@ namespace MySQLReplication
|
|||||||
if (!query.starts_with("XA COMMIT"))
|
if (!query.starts_with("XA COMMIT"))
|
||||||
transaction_complete = false;
|
transaction_complete = false;
|
||||||
}
|
}
|
||||||
else if (query.starts_with("SAVEPOINT"))
|
else if (query.starts_with("SAVEPOINT") || query.starts_with("ROLLBACK")
|
||||||
|
|| query.starts_with("RELEASE SAVEPOINT"))
|
||||||
{
|
{
|
||||||
typ = QUERY_SAVEPOINT;
|
typ = QUERY_SAVEPOINT;
|
||||||
}
|
}
|
||||||
@ -941,6 +942,7 @@ namespace MySQLReplication
|
|||||||
{
|
{
|
||||||
case QUERY_EVENT_MULTI_TXN_FLAG:
|
case QUERY_EVENT_MULTI_TXN_FLAG:
|
||||||
case QUERY_EVENT_XA:
|
case QUERY_EVENT_XA:
|
||||||
|
/// Ignore queries that have no impact on the data.
|
||||||
case QUERY_SAVEPOINT:
|
case QUERY_SAVEPOINT:
|
||||||
{
|
{
|
||||||
event = std::make_shared<DryRunEvent>(std::move(query->header));
|
event = std::make_shared<DryRunEvent>(std::move(query->header));
|
||||||
|
@ -175,7 +175,7 @@ public:
|
|||||||
FrontMessageType receiveMessageType()
|
FrontMessageType receiveMessageType()
|
||||||
{
|
{
|
||||||
char type = 0;
|
char type = 0;
|
||||||
in->read(type);
|
in->readStrict(type);
|
||||||
return static_cast<FrontMessageType>(type);
|
return static_cast<FrontMessageType>(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -614,7 +614,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
|||||||
\
|
\
|
||||||
M(Bool, load_marks_asynchronously, false, "Load MergeTree marks asynchronously", 0) \
|
M(Bool, load_marks_asynchronously, false, "Load MergeTree marks asynchronously", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, use_structure_from_insertion_table_in_table_functions, false, "Use structure from insertion table instead of schema inference from data", 0) \
|
M(UInt64, use_structure_from_insertion_table_in_table_functions, 2, "Use structure from insertion table instead of schema inference from data. Possible values: 0 - disabled, 1 - enabled, 2 - auto", 0) \
|
||||||
\
|
\
|
||||||
M(UInt64, http_max_tries, 10, "Max attempts to read via http.", 0) \
|
M(UInt64, http_max_tries, 10, "Max attempts to read via http.", 0) \
|
||||||
M(UInt64, http_retry_initial_backoff_ms, 100, "Min milliseconds for backoff, when retrying read via http", 0) \
|
M(UInt64, http_retry_initial_backoff_ms, 100, "Min milliseconds for backoff, when retrying read via http", 0) \
|
||||||
|
@ -78,6 +78,7 @@ namespace SettingsChangesHistory
|
|||||||
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
||||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||||
{
|
{
|
||||||
|
{"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}},
|
||||||
{"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}},
|
{"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}},
|
||||||
{"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"},
|
{"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"},
|
||||||
{"enable_positional_arguments", false, true, "Enable positional arguments feature by default"},
|
{"enable_positional_arguments", false, true, "Enable positional arguments feature by default"},
|
||||||
|
@ -355,27 +355,33 @@ private:
|
|||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
/// Write information about binary checksum. It can be difficult to calculate, so do it only after printing stack trace.
|
/// Write information about binary checksum. It can be difficult to calculate, so do it only after printing stack trace.
|
||||||
/// Please keep the below log messages in-sync with the ones in programs/server/Server.cpp
|
/// Please keep the below log messages in-sync with the ones in programs/server/Server.cpp
|
||||||
String calculated_binary_hash = getHashOfLoadedBinaryHex();
|
|
||||||
if (daemon.stored_binary_hash.empty())
|
if (daemon.stored_binary_hash.empty())
|
||||||
{
|
{
|
||||||
LOG_FATAL(log, "Integrity check of the executable skipped because the reference checksum could not be read."
|
LOG_FATAL(log, "Integrity check of the executable skipped because the reference checksum could not be read.");
|
||||||
" (calculated checksum: {})", calculated_binary_hash);
|
|
||||||
}
|
|
||||||
else if (calculated_binary_hash == daemon.stored_binary_hash)
|
|
||||||
{
|
|
||||||
LOG_FATAL(log, "Integrity check of the executable successfully passed (checksum: {})", calculated_binary_hash);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LOG_FATAL(log, "Calculated checksum of the executable ({0}) does not correspond"
|
String calculated_binary_hash = getHashOfLoadedBinaryHex();
|
||||||
" to the reference checksum stored in the executable ({1})."
|
if (calculated_binary_hash == daemon.stored_binary_hash)
|
||||||
" This may indicate one of the following:"
|
{
|
||||||
" - the executable was changed just after startup;"
|
LOG_FATAL(log, "Integrity check of the executable successfully passed (checksum: {})", calculated_binary_hash);
|
||||||
" - the executable was corrupted on disk due to faulty hardware;"
|
}
|
||||||
" - the loaded executable was corrupted in memory due to faulty hardware;"
|
else
|
||||||
" - the file was intentionally modified;"
|
{
|
||||||
" - a logical error in the code."
|
LOG_FATAL(
|
||||||
, calculated_binary_hash, daemon.stored_binary_hash);
|
log,
|
||||||
|
"Calculated checksum of the executable ({0}) does not correspond"
|
||||||
|
" to the reference checksum stored in the executable ({1})."
|
||||||
|
" This may indicate one of the following:"
|
||||||
|
" - the executable was changed just after startup;"
|
||||||
|
" - the executable was corrupted on disk due to faulty hardware;"
|
||||||
|
" - the loaded executable was corrupted in memory due to faulty hardware;"
|
||||||
|
" - the file was intentionally modified;"
|
||||||
|
" - a logical error in the code.",
|
||||||
|
calculated_binary_hash,
|
||||||
|
daemon.stored_binary_hash);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -136,11 +136,7 @@ protected:
|
|||||||
/// fork the main process and watch if it was killed
|
/// fork the main process and watch if it was killed
|
||||||
void setupWatchdog();
|
void setupWatchdog();
|
||||||
|
|
||||||
void waitForTerminationRequest()
|
void waitForTerminationRequest() override;
|
||||||
#if defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION >= 0x02000000 // in old upstream poco not vitrual
|
|
||||||
override
|
|
||||||
#endif
|
|
||||||
;
|
|
||||||
/// thread safe
|
/// thread safe
|
||||||
virtual void onInterruptSignals(int signal_id);
|
virtual void onInterruptSignals(int signal_id);
|
||||||
|
|
||||||
|
@ -116,6 +116,15 @@ template <typename A, typename B> struct ResultOfModulo
|
|||||||
using Type = std::conditional_t<std::is_floating_point_v<A> || std::is_floating_point_v<B>, Float64, Type0>;
|
using Type = std::conditional_t<std::is_floating_point_v<A> || std::is_floating_point_v<B>, Float64, Type0>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <typename A, typename B> struct ResultOfPositiveModulo
|
||||||
|
{
|
||||||
|
/// function positive_modulo always return non-negative number.
|
||||||
|
static constexpr size_t size_of_result = sizeof(B);
|
||||||
|
using Type0 = typename Construct<false, false, size_of_result>::Type;
|
||||||
|
using Type = std::conditional_t<std::is_floating_point_v<A> || std::is_floating_point_v<B>, Float64, Type0>;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
template <typename A, typename B> struct ResultOfModuloLegacy
|
template <typename A, typename B> struct ResultOfModuloLegacy
|
||||||
{
|
{
|
||||||
using Type0 = typename Construct<is_signed_v<A> || is_signed_v<B>, false, sizeof(B)>::Type;
|
using Type0 = typename Construct<is_signed_v<A> || is_signed_v<B>, false, sizeof(B)>::Type;
|
||||||
|
@ -47,7 +47,7 @@ void SerializationIPv4::deserializeText(IColumn & column, ReadBuffer & istr, con
|
|||||||
}
|
}
|
||||||
|
|
||||||
char buffer[IPV4_MAX_TEXT_LENGTH + 1] = {'\0'};
|
char buffer[IPV4_MAX_TEXT_LENGTH + 1] = {'\0'};
|
||||||
istr.read(buffer, sizeof(buffer) - 1);
|
[[maybe_unused]] size_t read_bytes = istr.read(buffer, sizeof(buffer) - 1);
|
||||||
UInt32 ipv4_value = 0;
|
UInt32 ipv4_value = 0;
|
||||||
|
|
||||||
bool parse_result = parseIPv4(buffer, reinterpret_cast<unsigned char *>(&ipv4_value));
|
bool parse_result = parseIPv4(buffer, reinterpret_cast<unsigned char *>(&ipv4_value));
|
||||||
@ -90,7 +90,7 @@ void SerializationIPv6::deserializeText(IColumn & column, ReadBuffer & istr, con
|
|||||||
}
|
}
|
||||||
|
|
||||||
char buffer[IPV6_MAX_TEXT_LENGTH + 1] = {'\0'};
|
char buffer[IPV6_MAX_TEXT_LENGTH + 1] = {'\0'};
|
||||||
istr.read(buffer, sizeof(buffer) - 1);
|
[[maybe_unused]] size_t read_bytes = istr.read(buffer, sizeof(buffer) - 1);
|
||||||
|
|
||||||
std::string ipv6_value(IPV6_BINARY_LENGTH, '\0');
|
std::string ipv6_value(IPV6_BINARY_LENGTH, '\0');
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user