Merge remote-tracking branch 'remotes/clickhouse/master' into sevirov-DOCSUP-5271-edit_translate_russian

This commit is contained in:
Dmitriy 2021-01-28 22:58:33 +03:00
commit 48ca41eb22
307 changed files with 3383 additions and 1625 deletions

45
.pylintrc Normal file
View File

@ -0,0 +1,45 @@
# vim: ft=config
[BASIC]
max-module-lines=2000
# due to SQL
max-line-length=200
# Drop/decrease them one day:
max-branches=50
max-nested-blocks=10
max-statements=200
[FORMAT]
ignore-long-lines = (# )?<?https?://\S+>?$
[MESSAGES CONTROL]
disable = bad-continuation,
missing-docstring,
bad-whitespace,
too-few-public-methods,
invalid-name,
too-many-arguments,
keyword-arg-before-vararg,
too-many-locals,
too-many-instance-attributes,
cell-var-from-loop,
fixme,
too-many-public-methods,
wildcard-import,
unused-wildcard-import,
singleton-comparison,
# pytest.mark.parametrize is not callable (not-callable)
not-callable,
# https://github.com/PyCQA/pylint/issues/3882
# [Python 3.9] Value 'Optional' is unsubscriptable (unsubscriptable-object) (also Union)
unsubscriptable-object,
# Drop them one day:
redefined-outer-name,
broad-except,
bare-except,
no-else-return,
global-statement
[SIMILARITIES]
# due to SQL
min-similarity-lines=1000

View File

@ -851,7 +851,7 @@ public:
} }
/// Saturation can occur if 29 Feb is mapped to non-leap year. /// Saturation can occur if 29 Feb is mapped to non-leap year.
inline time_t addYears(time_t t, Int64 delta) const inline NO_SANITIZE_UNDEFINED time_t addYears(time_t t, Int64 delta) const
{ {
DayNum result_day = addYears(toDayNum(t), delta); DayNum result_day = addYears(toDayNum(t), delta);

View File

@ -104,8 +104,3 @@ template <> struct is_big_int<wUInt256> { static constexpr bool value = true; };
template <typename T> template <typename T>
inline constexpr bool is_big_int_v = is_big_int<T>::value; inline constexpr bool is_big_int_v = is_big_int<T>::value;
template <typename To, typename From>
inline To bigint_cast(const From & x [[maybe_unused]])
{
return static_cast<To>(x);
}

2
contrib/aws vendored

@ -1 +1 @@
Subproject commit a220591e335923ce1c19bbf9eb925787f7ab6c13 Subproject commit 7d48b2c8193679cc4516e5bd68ae4a64b94dae7d

2
contrib/cassandra vendored

@ -1 +1 @@
Subproject commit 9cbc1a806df5d40fddbf84533b9873542c6513d8 Subproject commit b446d7eb68e6962f431e2b3771313bfe9a2bbd93

View File

@ -43,6 +43,7 @@ RUN apt-get update \
clang-tidy-${LLVM_VERSION} \ clang-tidy-${LLVM_VERSION} \
cmake \ cmake \
curl \ curl \
lsof \
expect \ expect \
fakeroot \ fakeroot \
git \ git \

View File

@ -21,13 +21,16 @@ function clone
git init git init
git remote add origin https://github.com/ClickHouse/ClickHouse git remote add origin https://github.com/ClickHouse/ClickHouse
git fetch --depth=100 origin "$SHA_TO_TEST"
git fetch --depth=100 origin master # Used to obtain the list of modified or added tests # Network is unreliable. GitHub neither.
for _ in {1..100}; do git fetch --depth=100 origin "$SHA_TO_TEST" && break; sleep 1; done
# Used to obtain the list of modified or added tests
for _ in {1..100}; do git fetch --depth=100 origin master && break; sleep 1; done
# If not master, try to fetch pull/.../{head,merge} # If not master, try to fetch pull/.../{head,merge}
if [ "$PR_TO_TEST" != "0" ] if [ "$PR_TO_TEST" != "0" ]
then then
git fetch --depth=100 origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*" for _ in {1..100}; do git fetch --depth=100 origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*" && break; sleep 1; done
fi fi
git checkout "$SHA_TO_TEST" git checkout "$SHA_TO_TEST"
@ -189,14 +192,14 @@ case "$stage" in
echo "failure" > status.txt echo "failure" > status.txt
if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*" server.log > description.txt if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*" server.log > description.txt
then then
echo "Lost connection to server. See the logs" > description.txt echo "Lost connection to server. See the logs." > description.txt
fi fi
else else
# Something different -- maybe the fuzzer itself died? Don't grep the # Something different -- maybe the fuzzer itself died? Don't grep the
# server log in this case, because we will find a message about normal # server log in this case, because we will find a message about normal
# server termination (Received signal 15), which is confusing. # server termination (Received signal 15), which is confusing.
echo "failure" > status.txt echo "failure" > status.txt
echo "Fuzzer failed ($fuzzer_exit_code). See the logs" > description.txt echo "Fuzzer failed ($fuzzer_exit_code). See the logs." > description.txt
fi fi
;& ;&
"report") "report")

View File

@ -1,4 +1,4 @@
#!/usr/bin/python3 #!/usr/bin/env python3
import argparse import argparse
import clickhouse_driver import clickhouse_driver

View File

@ -55,12 +55,11 @@ function run_tests()
ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip') ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip')
fi fi
for _ in $(seq 1 "$NUM_TRIES"); do clickhouse-test --testname --shard --zookeeper --hung-check --print-time \
clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt --test-runs "$NUM_TRIES" --jobs 4 \
if [ "${PIPESTATUS[0]}" -ne "0" ]; then "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
break; | ts '%Y-%m-%d %H:%M:%S' \
fi | tee -a test_output/test_result.txt
done
} }
export -f run_tests export -f run_tests

View File

@ -1,7 +1,7 @@
# docker build -t yandex/clickhouse-style-test . # docker build -t yandex/clickhouse-style-test .
FROM ubuntu:20.04 FROM ubuntu:20.04
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes shellcheck libxml2-utils git python3-pip && pip3 install codespell RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes shellcheck libxml2-utils git python3-pip python3-pytest && pip3 install codespell
CMD cd /ClickHouse/utils/check-style && \ CMD cd /ClickHouse/utils/check-style && \

View File

@ -114,6 +114,10 @@ CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.
- `_path` — Path to the file. - `_path` — Path to the file.
- `_file` — Name of the file. - `_file` — Name of the file.
**See Also**
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
## S3-related settings {#settings} ## S3-related settings {#settings}
The following settings can be set before query execution or placed into configuration file. The following settings can be set before query execution or placed into configuration file.
@ -124,8 +128,29 @@ The following settings can be set before query execution or placed into configur
Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration. Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration.
**See Also** ### Endpoint-based settings {#endpointsettings}
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) The following settings can be specified in configuration file for given endpoint (which will be matched by exact prefix of a URL):
- `endpoint` — Mandatory. Specifies prefix of an endpoint.
- `access_key_id` and `secret_access_key` — Optional. Specifies credentials to use with given endpoint.
- `use_environment_credentials` — Optional, default value is `false`. If set to `true`, S3 client will try to obtain credentials from environment variables and Amazon EC2 metadata for given endpoint.
- `header` — Optional, can be speficied multiple times. Adds specified HTTP header to a request to given endpoint.
This configuration also applies to S3 disks in `MergeTree` table engine family.
Example:
```
<s3>
<endpoint-name>
<endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint>
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
<!-- <use_environment_credentials>false</use_environment_credentials> -->
<!-- <header>Authorization: Bearer SOME-TOKEN</header> -->
</endpoint-name>
</s3>
```
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/s3/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/operations/table_engines/s3/) <!--hide-->

View File

@ -254,7 +254,6 @@ ENGINE = MergeTree()
PARTITION BY toYYYYMM(EventDate) PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID)) ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
``` sql ``` sql
@ -450,7 +449,6 @@ ENGINE = CollapsingMergeTree(Sign)
PARTITION BY toYYYYMM(StartDate) PARTITION BY toYYYYMM(StartDate)
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want. You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want.

View File

@ -27,6 +27,8 @@ We recommend using SQL-driven workflow. Both of the configuration methods work s
!!! note "Warning" !!! note "Warning"
You cant manage the same access entity by both configuration methods simultaneously. You cant manage the same access entity by both configuration methods simultaneously.
To see all users, roles, profiles, etc. and all their grants use [SHOW ACCESS](../sql-reference/statements/show.md#show-access-statement) statement.
## Usage {#access-control-usage} ## Usage {#access-control-usage}
By default, the ClickHouse server provides the `default` user account which is not allowed using SQL-driven access control and account management but has all the rights and permissions. The `default` user account is used in any cases when the username is not defined, for example, at login from client or in distributed queries. In distributed query processing a default user account is used, if the configuration of the server or cluster doesnt specify the [user and password](../engines/table-engines/special/distributed.md) properties. By default, the ClickHouse server provides the `default` user account which is not allowed using SQL-driven access control and account management but has all the rights and permissions. The `default` user account is used in any cases when the username is not defined, for example, at login from client or in distributed queries. In distributed query processing a default user account is used, if the configuration of the server or cluster doesnt specify the [user and password](../engines/table-engines/special/distributed.md) properties.

View File

@ -2489,7 +2489,6 @@ Possible values:
Default value: `0`. Default value: `0`.
## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty} ## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty}
Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility. Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility.
@ -2523,7 +2522,6 @@ With `aggregate_functions_null_for_empty = 1` the result would be:
└───────────────┴──────────────┘ └───────────────┴──────────────┘
``` ```
## union_default_mode {#union-default-mode} ## union_default_mode {#union-default-mode}
Sets a mode for combining `SELECT` query results. The setting is only used when shared with [UNION](../../sql-reference/statements/select/union.md) without explicitly specifying the `UNION ALL` or `UNION DISTINCT`. Sets a mode for combining `SELECT` query results. The setting is only used when shared with [UNION](../../sql-reference/statements/select/union.md) without explicitly specifying the `UNION ALL` or `UNION DISTINCT`.
@ -2538,7 +2536,6 @@ Default value: `''`.
See examples in [UNION](../../sql-reference/statements/select/union.md). See examples in [UNION](../../sql-reference/statements/select/union.md).
## data_type_default_nullable {#data_type_default_nullable} ## data_type_default_nullable {#data_type_default_nullable}
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable). Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable).
@ -2550,7 +2547,6 @@ Possible values:
Default value: `0`. Default value: `0`.
## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold} ## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold}
Enables special logic to perform merges on replicas. Enables special logic to perform merges on replicas.
@ -2570,4 +2566,15 @@ High values for that threshold may lead to replication delays.
It can be useful when merges are CPU bounded not IO bounded (performing heavy data compression, calculating aggregate functions or default expressions that require a large amount of calculations, or just very high number of tiny merges). It can be useful when merges are CPU bounded not IO bounded (performing heavy data compression, calculating aggregate functions or default expressions that require a large amount of calculations, or just very high number of tiny merges).
## max_final_threads {#max-final-threads}
Sets the maximum number of parallel threads for the `SELECT` query data read phase with the [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier.
Possible values:
- Positive integer.
- 0 or 1 — Disabled. `SELECT` queries are executed in a single thread.
Default value: `16`.
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide --> [Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->

View File

@ -413,4 +413,68 @@ Result:
- [log(x)](../../sql-reference/functions/math-functions.md#logx-lnx) - [log(x)](../../sql-reference/functions/math-functions.md#logx-lnx)
## sign(x) {#signx}
The `sign` function can extract the sign of a real number.
**Syntax**
``` sql
sign(x)
```
**Parameters**
- `x` — Values from `-∞` to `+∞`. Support all numeric types in ClickHouse.
**Returned value**
- -1 for `x < 0`
- 0 for `x = 0`
- 1 for `x > 0`
**Example**
Query:
``` sql
SELECT sign(0);
```
Result:
``` text
┌─sign(0)─┐
│ 0 │
└─────────┘
```
Query:
``` sql
SELECT sign(1);
```
Result:
``` text
┌─sign(1)─┐
│ 1 │
└─────────┘
```
Query:
``` sql
SELECT sign(-1);
```
Result:
``` text
┌─sign(-1)─┐
│ -1 │
└──────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) <!--hide-->

View File

@ -5,16 +5,35 @@ toc_title: QUOTA
# ALTER QUOTA {#alter-quota-statement} # ALTER QUOTA {#alter-quota-statement}
Changes quotas. Changes [quotas](../../../operations/access-rights.md#quotas-management).
Syntax: Syntax:
``` sql ``` sql
ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name] ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name]
[RENAME TO new_name] [RENAME TO new_name]
[KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] [KEYED BY {user_name | ip_address | client_key | client_key,user_name | client_key,ip_address} | NOT KEYED]
[FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR} [FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
{MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] | {MAX { {queries | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |
NO LIMITS | TRACKING ONLY} [,...]] NO LIMITS | TRACKING ONLY} [,...]]
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}] [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
``` ```
Keys `user_name`, `ip_address`, `client_key`, `client_key, user_name` and `client_key, ip_address` correspond to the fields in the [system.quotas](../../../operations/system-tables/quotas.md) table.
Parameters `queries`, `errors`, `result_rows`, `result_bytes`, `read_rows`, `read_bytes`, `execution_time` correspond to the fields in the [system.quotas_usage](../../../operations/system-tables/quotas_usage.md) table.
`ON CLUSTER` clause allows creating quotas on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).
**Examples**
Limit the maximum number of queries for the current user with 123 queries in 15 months constraint:
``` sql
ALTER QUOTA IF EXISTS qA FOR INTERVAL 15 month MAX queries = 123 TO CURRENT_USER;
```
For the default user limit the maximum execution time with half a second in 30 minutes, and limit the maximum number of queries with 321 and the maximum number of errors with 10 in 5 quaters:
``` sql
ALTER QUOTA IF EXISTS qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default;
```

View File

@ -10,7 +10,7 @@ Changes roles.
Syntax: Syntax:
``` sql ``` sql
ALTER ROLE [IF EXISTS] name [ON CLUSTER cluster_name] ALTER ROLE [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
[RENAME TO new_name] [, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
``` ```

View File

@ -10,8 +10,8 @@ Changes row policy.
Syntax: Syntax:
``` sql ``` sql
ALTER [ROW] POLICY [IF EXISTS] name [ON CLUSTER cluster_name] ON [database.]table ALTER [ROW] POLICY [IF EXISTS] name1 [ON CLUSTER cluster_name1] ON [database1.]table1 [RENAME TO new_name1]
[RENAME TO new_name] [, name2 [ON CLUSTER cluster_name2] ON [database2.]table2 [RENAME TO new_name2] ...]
[AS {PERMISSIVE | RESTRICTIVE}] [AS {PERMISSIVE | RESTRICTIVE}]
[FOR SELECT] [FOR SELECT]
[USING {condition | NONE}][,...] [USING {condition | NONE}][,...]

View File

@ -10,7 +10,7 @@ Changes settings profiles.
Syntax: Syntax:
``` sql ``` sql
ALTER SETTINGS PROFILE [IF EXISTS] TO name [ON CLUSTER cluster_name] ALTER SETTINGS PROFILE [IF EXISTS] TO name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
[RENAME TO new_name] [, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...]
``` ```

View File

@ -10,8 +10,8 @@ Changes ClickHouse user accounts.
Syntax: Syntax:
``` sql ``` sql
ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name] ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
[RENAME TO new_name] [, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
[IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}]
[[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
[DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]

View File

@ -11,19 +11,29 @@ Syntax:
``` sql ``` sql
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
[KEYED BY {'none' | 'user name' | 'ip address' | 'forwarded ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] [KEYED BY {user_name | ip_address | client_key | client_key,user_name | client_key,ip_address} | NOT KEYED]
[FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR} [FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
{MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] | {MAX { {queries | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |
NO LIMITS | TRACKING ONLY} [,...]] NO LIMITS | TRACKING ONLY} [,...]]
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}] [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
``` ```
Keys `user_name`, `ip_address`, `client_key`, `client_key, user_name` and `client_key, ip_address` correspond to the fields in the [system.quotas](../../../operations/system-tables/quotas.md) table.
Parameters `queries`, `errors`, `result_rows`, `result_bytes`, `read_rows`, `read_bytes`, `execution_time` correspond to the fields in the [system.quotas_usage](../../../operations/system-tables/quotas_usage.md) table.
`ON CLUSTER` clause allows creating quotas on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). `ON CLUSTER` clause allows creating quotas on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).
## Example {#create-quota-example} **Examples**
Limit the maximum number of queries for the current user with 123 queries in 15 months constraint: Limit the maximum number of queries for the current user with 123 queries in 15 months constraint:
``` sql ``` sql
CREATE QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 123 TO CURRENT_USER CREATE QUOTA qA FOR INTERVAL 15 month MAX queries = 123 TO CURRENT_USER;
```
For the default user limit the maximum execution time with half a second in 30 minutes, and limit the maximum number of queries with 321 and the maximum number of errors with 10 in 5 quaters:
``` sql
CREATE QUOTA qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default;
``` ```

View File

@ -5,12 +5,12 @@ toc_title: ROLE
# CREATE ROLE {#create-role-statement} # CREATE ROLE {#create-role-statement}
Creates a new [role](../../../operations/access-rights.md#role-management). Role is a set of [privileges](../../../sql-reference/statements/grant.md#grant-privileges). A [user](../../../sql-reference/statements/create/user.md) assigned a role gets all the privileges of this role. Creates new [roles](../../../operations/access-rights.md#role-management). Role is a set of [privileges](../../../sql-reference/statements/grant.md#grant-privileges). A [user](../../../sql-reference/statements/create/user.md) assigned a role gets all the privileges of this role.
Syntax: Syntax:
``` sql ``` sql
CREATE ROLE [IF NOT EXISTS | OR REPLACE] name CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [, name2 ...]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
``` ```

View File

@ -5,16 +5,17 @@ toc_title: ROW POLICY
# CREATE ROW POLICY {#create-row-policy-statement} # CREATE ROW POLICY {#create-row-policy-statement}
Creates a [filter for rows](../../../operations/access-rights.md#row-policy-management), which a user can read from a table. Creates [filters for rows](../../../operations/access-rights.md#row-policy-management), which a user can read from a table.
Syntax: Syntax:
``` sql ``` sql
CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster_name] ON [db.]table CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1
[, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...]
[AS {PERMISSIVE | RESTRICTIVE}] [AS {PERMISSIVE | RESTRICTIVE}]
[FOR SELECT] [FOR SELECT]
[USING condition] [USING condition]
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}] [TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}]
``` ```
`ON CLUSTER` clause allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md). `ON CLUSTER` clause allows creating row policies on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).

View File

@ -5,12 +5,13 @@ toc_title: SETTINGS PROFILE
# CREATE SETTINGS PROFILE {#create-settings-profile-statement} # CREATE SETTINGS PROFILE {#create-settings-profile-statement}
Creates a [settings profile](../../../operations/access-rights.md#settings-profiles-management) that can be assigned to a user or a role. Creates [settings profiles](../../../operations/access-rights.md#settings-profiles-management) that can be assigned to a user or a role.
Syntax: Syntax:
``` sql ``` sql
CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] TO name [ON CLUSTER cluster_name] CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] TO name1 [ON CLUSTER cluster_name1]
[, name2 [ON CLUSTER cluster_name2] ...]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...]
``` ```

View File

@ -45,7 +45,7 @@ Creates a table with the same structure as another table. You can specify a diff
CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function()
``` ```
Creates a table with the structure and data returned by a [table function](../../../sql-reference/table-functions/index.md#table-functions). Creates a table with the same result as that of the [table function](../../../sql-reference/table-functions/index.md#table-functions) specified. The created table will also work in the same way as the corresponding table function that was specified.
``` sql ``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ...

View File

@ -5,12 +5,13 @@ toc_title: USER
# CREATE USER {#create-user-statement} # CREATE USER {#create-user-statement}
Creates a [user account](../../../operations/access-rights.md#user-account-management). Creates [user accounts](../../../operations/access-rights.md#user-account-management).
Syntax: Syntax:
``` sql ``` sql
CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
[, name2 [ON CLUSTER cluster_name2] ...]
[IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}]
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
[DEFAULT ROLE role [,...]] [DEFAULT ROLE role [,...]]
@ -69,7 +70,7 @@ CREATE USER john DEFAULT ROLE role1, role2
Create the user account `john` and make all his future roles default: Create the user account `john` and make all his future roles default:
``` sql ``` sql
ALTER USER user DEFAULT ROLE ALL CREATE USER user DEFAULT ROLE ALL
``` ```
When some role is assigned to `john` in the future, it will become default automatically. When some role is assigned to `john` in the future, it will become default automatically.
@ -77,5 +78,5 @@ When some role is assigned to `john` in the future, it will become default autom
Create the user account `john` and make all his future roles default excepting `role1` and `role2`: Create the user account `john` and make all his future roles default excepting `role1` and `role2`:
``` sql ``` sql
ALTER USER john DEFAULT ROLE ALL EXCEPT role1, role2 CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2
``` ```

View File

@ -13,7 +13,7 @@ Basic query format:
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
``` ```
You can specify a list of columns to insert using the `(c1, c2, c3)`. You can also use an expression with column [matcher](../../sql-reference/statements/select/index.md#asterisk) such as `*` and/or [modifiers](../../sql-reference/statements/select/index.md#select-modifiers) such as [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#except-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier). You can specify a list of columns to insert using the `(c1, c2, c3)`. You can also use an expression with column [matcher](../../sql-reference/statements/select/index.md#asterisk) such as `*` and/or [modifiers](../../sql-reference/statements/select/index.md#select-modifiers) such as [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#apply-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier).
For example, consider the table: For example, consider the table:
@ -30,7 +30,6 @@ CREATE TABLE insert_select_testtable
) )
ENGINE = MergeTree() ENGINE = MergeTree()
ORDER BY a ORDER BY a
SETTINGS index_granularity = 8192
``` ```
``` sql ``` sql

View File

@ -25,6 +25,8 @@ It is applicable when selecting data from tables that use the [MergeTree](../../
- [Replicated](../../../engines/table-engines/mergetree-family/replication.md) versions of `MergeTree` engines. - [Replicated](../../../engines/table-engines/mergetree-family/replication.md) versions of `MergeTree` engines.
- [View](../../../engines/table-engines/special/view.md), [Buffer](../../../engines/table-engines/special/buffer.md), [Distributed](../../../engines/table-engines/special/distributed.md), and [MaterializedView](../../../engines/table-engines/special/materializedview.md) engines that operate over other engines, provided they were created over `MergeTree`-engine tables. - [View](../../../engines/table-engines/special/view.md), [Buffer](../../../engines/table-engines/special/buffer.md), [Distributed](../../../engines/table-engines/special/distributed.md), and [MaterializedView](../../../engines/table-engines/special/materializedview.md) engines that operate over other engines, provided they were created over `MergeTree`-engine tables.
Now `SELECT` queries with `FINAL` are executed in parallel and slightly faster. But there are drawbacks (see below). The [max_final_threads](../../../operations/settings/settings.md#max-final-threads) setting limits the number of threads used.
### Drawbacks {#drawbacks} ### Drawbacks {#drawbacks}
Queries that use `FINAL` are executed slightly slower than similar queries that dont, because: Queries that use `FINAL` are executed slightly slower than similar queries that dont, because:

View File

@ -231,7 +231,7 @@ Shows privileges for a user.
### Syntax {#show-grants-syntax} ### Syntax {#show-grants-syntax}
``` sql ``` sql
SHOW GRANTS [FOR user] SHOW GRANTS [FOR user1 [, user2 ...]]
``` ```
If user is not specified, the query returns privileges for the current user. If user is not specified, the query returns privileges for the current user.
@ -245,7 +245,7 @@ Shows parameters that were used at a [user creation](../../sql-reference/stateme
### Syntax {#show-create-user-syntax} ### Syntax {#show-create-user-syntax}
``` sql ``` sql
SHOW CREATE USER [name | CURRENT_USER] SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER]
``` ```
## SHOW CREATE ROLE {#show-create-role-statement} ## SHOW CREATE ROLE {#show-create-role-statement}
@ -255,7 +255,7 @@ Shows parameters that were used at a [role creation](../../sql-reference/stateme
### Syntax {#show-create-role-syntax} ### Syntax {#show-create-role-syntax}
``` sql ``` sql
SHOW CREATE ROLE name SHOW CREATE ROLE name1 [, name2 ...]
``` ```
## SHOW CREATE ROW POLICY {#show-create-row-policy-statement} ## SHOW CREATE ROW POLICY {#show-create-row-policy-statement}
@ -265,7 +265,7 @@ Shows parameters that were used at a [row policy creation](../../sql-reference/s
### Syntax {#show-create-row-policy-syntax} ### Syntax {#show-create-row-policy-syntax}
``` sql ``` sql
SHOW CREATE [ROW] POLICY name ON [database.]table SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...]
``` ```
## SHOW CREATE QUOTA {#show-create-quota-statement} ## SHOW CREATE QUOTA {#show-create-quota-statement}
@ -275,7 +275,7 @@ Shows parameters that were used at a [quota creation](../../sql-reference/statem
### Syntax {#show-create-quota-syntax} ### Syntax {#show-create-quota-syntax}
``` sql ``` sql
SHOW CREATE QUOTA [name | CURRENT] SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT]
``` ```
## SHOW CREATE SETTINGS PROFILE {#show-create-settings-profile-statement} ## SHOW CREATE SETTINGS PROFILE {#show-create-settings-profile-statement}
@ -285,7 +285,7 @@ Shows parameters that were used at a [settings profile creation](../../sql-refer
### Syntax {#show-create-settings-profile-syntax} ### Syntax {#show-create-settings-profile-syntax}
``` sql ``` sql
SHOW CREATE [SETTINGS] PROFILE name SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...]
``` ```
## SHOW USERS {#show-users-statement} ## SHOW USERS {#show-users-statement}
@ -307,7 +307,6 @@ Returns a list of [roles](../../operations/access-rights.md#role-management). To
``` sql ``` sql
SHOW [CURRENT|ENABLED] ROLES SHOW [CURRENT|ENABLED] ROLES
``` ```
## SHOW PROFILES {#show-profiles-statement} ## SHOW PROFILES {#show-profiles-statement}
Returns a list of [setting profiles](../../operations/access-rights.md#settings-profiles-management). To view user accounts parameters, see the system table [settings_profiles](../../operations/system-tables/settings_profiles.md#system_tables-settings_profiles). Returns a list of [setting profiles](../../operations/access-rights.md#settings-profiles-management). To view user accounts parameters, see the system table [settings_profiles](../../operations/system-tables/settings_profiles.md#system_tables-settings_profiles).
@ -347,7 +346,15 @@ Returns a [quota](../../operations/quotas.md) consumption for all users or for c
``` sql ``` sql
SHOW [CURRENT] QUOTA SHOW [CURRENT] QUOTA
``` ```
## SHOW ACCESS {#show-access-statement}
Shows all [users](../../operations/access-rights.md#user-account-management), [roles](../../operations/access-rights.md#role-management), [profiles](../../operations/access-rights.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#grant-privileges).
### Syntax {#show-access-syntax}
``` sql
SHOW ACCESS
```
## SHOW CLUSTER(s) {#show-cluster-statement} ## SHOW CLUSTER(s) {#show-cluster-statement}
Returns a list of clusters. All available clusters are listed in the [system.clusters](../../operations/system-tables/clusters.md) table. Returns a list of clusters. All available clusters are listed in the [system.clusters](../../operations/system-tables/clusters.md) table.

View File

@ -256,7 +256,6 @@ ENGINE = MergeTree()
PARTITION BY toYYYYMM(EventDate) PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID)) ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
``` sql ``` sql
@ -452,7 +451,6 @@ ENGINE = CollapsingMergeTree(Sign)
PARTITION BY toYYYYMM(StartDate) PARTITION BY toYYYYMM(StartDate)
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
Puede ejecutar esas consultas utilizando el modo interactivo de `clickhouse-client` (simplemente ejecútelo en un terminal sin especificar una consulta por adelantado) o pruebe algunos [interfaz alternativa](../interfaces/index.md) Si quieres. Puede ejecutar esas consultas utilizando el modo interactivo de `clickhouse-client` (simplemente ejecútelo en un terminal sin especificar una consulta por adelantado) o pruebe algunos [interfaz alternativa](../interfaces/index.md) Si quieres.

View File

@ -256,7 +256,6 @@ ENGINE = MergeTree()
PARTITION BY toYYYYMM(EventDate) PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID)) ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
``` sql ``` sql
@ -452,7 +451,6 @@ ENGINE = CollapsingMergeTree(Sign)
PARTITION BY toYYYYMM(StartDate) PARTITION BY toYYYYMM(StartDate)
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
Vous pouvez exécuter ces requêtes en utilisant le mode interactif de `clickhouse-client` (lancez - le simplement dans un terminal sans spécifier une requête à l'avance) ou essayez-en [interface de rechange](../interfaces/index.md) Si tu veux. Vous pouvez exécuter ces requêtes en utilisant le mode interactif de `clickhouse-client` (lancez - le simplement dans un terminal sans spécifier une requête à l'avance) ou essayez-en [interface de rechange](../interfaces/index.md) Si tu veux.

View File

@ -262,7 +262,6 @@ ENGINE = MergeTree()
PARTITION BY toYYYYMM(EventDate) PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID)) ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
``` sql ``` sql
@ -458,7 +457,6 @@ ENGINE = CollapsingMergeTree(Sign)
PARTITION BY toYYYYMM(StartDate) PARTITION BY toYYYYMM(StartDate)
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
これらのクエリは、`clickhouse-client` の対話型モード(事前にクエリを指定せずにターミナルで起動するだけです)を使って実行するか、[代替インターフェイス](../interfaces/index.md) で実行できます。 これらのクエリは、`clickhouse-client` の対話型モード(事前にクエリを指定せずにターミナルで起動するだけです)を使って実行するか、[代替インターフェイス](../interfaces/index.md) で実行できます。

View File

@ -254,7 +254,6 @@ ENGINE = MergeTree()
PARTITION BY toYYYYMM(EventDate) PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID)) ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
``` sql ``` sql
@ -450,7 +449,6 @@ ENGINE = CollapsingMergeTree(Sign)
PARTITION BY toYYYYMM(StartDate) PARTITION BY toYYYYMM(StartDate)
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want. You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want.

View File

@ -28,6 +28,7 @@ ClickHouse поддерживает управление доступом на
!!! note "Внимание" !!! note "Внимание"
Нельзя одновременно использовать оба метода для управления одним и тем же объектом системы доступа. Нельзя одновременно использовать оба метода для управления одним и тем же объектом системы доступа.
Чтобы посмотреть список всех пользователей, ролей, профилей и пр., а также все привилегии, используйте запрос [SHOW ACCESS](../sql-reference/statements/show.md#show-access-statement).
## Использование {#access-control-usage} ## Использование {#access-control-usage}

View File

@ -2437,4 +2437,15 @@ SELECT SUM(-1), MAX(0) FROM system.one WHERE 0;
Эта настройка полезна, когда скорость слияния ограничивается мощностью процессора, а не скоростью операций ввода-вывода (при выполнении "тяжелого" сжатия данных, при расчете агрегатных функций или выражений по умолчанию, требующих большого объема вычислений, или просто при большом количестве мелких слияний). Эта настройка полезна, когда скорость слияния ограничивается мощностью процессора, а не скоростью операций ввода-вывода (при выполнении "тяжелого" сжатия данных, при расчете агрегатных функций или выражений по умолчанию, требующих большого объема вычислений, или просто при большом количестве мелких слияний).
## max_final_threads {#max-final-threads}
Устанавливает максимальное количество параллельных потоков для фазы чтения данных запроса `SELECT` с модификатором [FINAL](../../sql-reference/statements/select/from.md#select-from-final).
Возможные значения:
- Положительное целое число.
- 0 или 1 — настройка отключена. `SELECT` запросы выполняются в один поток.
Значение по умолчанию: `16`.
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide--> [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide-->

View File

@ -5,18 +5,38 @@ toc_title: QUOTA
# ALTER QUOTA {#alter-quota-statement} # ALTER QUOTA {#alter-quota-statement}
Изменяет квоту. Изменяет [квоту](../../../operations/access-rights.md#quotas-management).
## Синтаксис {#alter-quota-syntax} Синтаксис:
``` sql ``` sql
ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name] ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name]
[RENAME TO new_name] [RENAME TO new_name]
[KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] [KEYED BY {user_name | ip_address | client_key | client_key,user_name | client_key,ip_address} | NOT KEYED]
[FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR} [FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
{MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] | {MAX { {queries | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |
NO LIMITS | TRACKING ONLY} [,...]] NO LIMITS | TRACKING ONLY} [,...]]
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}] [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
``` ```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/quota/) <!--hide--> Ключи `user_name`, `ip_address`, `client_key`, `client_key, user_name` и `client_key, ip_address` соответствуют полям таблицы [system.quotas](../../../operations/system-tables/quotas.md).
Параметры `queries`, `errors`, `result_rows`, `result_bytes`, `read_rows`, `read_bytes`, `execution_time` соответствуют полям таблицы [system.quotas_usage](../../../operations/system-tables/quotas_usage.md).
В секции `ON CLUSTER` можно указать кластеры, на которых создается квота, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md).
**Примеры**
Ограничить для текущего пользователя максимальное число запросов — не более 123 запросов за каждые 15 месяцев:
``` sql
ALTER QUOTA IF EXISTS qA FOR INTERVAL 15 month MAX queries = 123 TO CURRENT_USER;
```
Ограничить по умолчанию максимальное время выполнения запроса — не более полсекунды за каждые 30 минут, а также максимальное число запросов — не более 321 и максимальное число ошибок — не более 10 за каждые 5 кварталов:
``` sql
ALTER QUOTA IF EXISTS qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default;
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/alter/quota/) <!--hide-->

View File

@ -5,14 +5,14 @@ toc_title: ROLE
# ALTER ROLE {#alter-role-statement} # ALTER ROLE {#alter-role-statement}
Изменяет роль. Изменяет роли.
## Синтаксис {#alter-role-syntax} Синтаксис:
``` sql ``` sql
ALTER ROLE [IF EXISTS] name [ON CLUSTER cluster_name] ALTER ROLE [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
[RENAME TO new_name] [, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
``` ```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/role/) <!--hide--> [Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/alter/role/) <!--hide-->

View File

@ -7,15 +7,15 @@ toc_title: ROW POLICY
Изменяет политику доступа к строкам. Изменяет политику доступа к строкам.
## Синтаксис {#alter-row-policy-syntax} Синтаксис:
``` sql ``` sql
ALTER [ROW] POLICY [IF EXISTS] name [ON CLUSTER cluster_name] ON [database.]table ALTER [ROW] POLICY [IF EXISTS] name1 [ON CLUSTER cluster_name1] ON [database1.]table1 [RENAME TO new_name1]
[RENAME TO new_name] [, name2 [ON CLUSTER cluster_name2] ON [database2.]table2 [RENAME TO new_name2] ...]
[AS {PERMISSIVE | RESTRICTIVE}] [AS {PERMISSIVE | RESTRICTIVE}]
[FOR SELECT] [FOR SELECT]
[USING {condition | NONE}][,...] [USING {condition | NONE}][,...]
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}] [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
``` ```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/row-policy/) <!--hide--> [Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/alter/row-policy/) <!--hide-->

View File

@ -7,12 +7,12 @@ toc_title: SETTINGS PROFILE
Изменяет профили настроек. Изменяет профили настроек.
## Синтаксис {#alter-settings-profile-syntax} Синтаксис:
``` sql ``` sql
ALTER SETTINGS PROFILE [IF EXISTS] name [ON CLUSTER cluster_name] ALTER SETTINGS PROFILE [IF EXISTS] TO name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
[RENAME TO new_name] [, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...]
``` ```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/settings-profile) <!--hide--> [Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/alter/settings-profile) <!--hide-->

View File

@ -5,21 +5,19 @@ toc_title: USER
# ALTER USER {#alter-user-statement} # ALTER USER {#alter-user-statement}
Изменяет аккаунт пользователя ClickHouse. Изменяет аккаунты пользователей ClickHouse.
## Синтаксис {#alter-user-syntax} Синтаксис:
``` sql ``` sql
ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name] ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
[RENAME TO new_name] [, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
[IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}]
[[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
[DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
``` ```
## Описание {#alter-user-dscr}
Для выполнения `ALTER USER` необходима привилегия [ALTER USER](../grant.md#grant-access-management). Для выполнения `ALTER USER` необходима привилегия [ALTER USER](../grant.md#grant-access-management).
## Примеры {#alter-user-examples} ## Примеры {#alter-user-examples}

View File

@ -7,23 +7,34 @@ toc_title: "\u041a\u0432\u043e\u0442\u0430"
Создает [квоту](../../../operations/access-rights.md#quotas-management), которая может быть присвоена пользователю или роли. Создает [квоту](../../../operations/access-rights.md#quotas-management), которая может быть присвоена пользователю или роли.
### Синтаксис {#create-quota-syntax} Синтаксис:
``` sql ``` sql
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
[KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] [KEYED BY {user_name | ip_address | client_key | client_key, user_name | client_key, ip_address} | NOT KEYED]
[FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR} [FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
{MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] | {MAX { {queries | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |
NO LIMITS | TRACKING ONLY} [,...]] NO LIMITS | TRACKING ONLY} [,...]]
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}] [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
``` ```
Ключи `user_name`, `ip_address`, `client_key`, `client_key, user_name` и `client_key, ip_address` соответствуют полям таблицы [system.quotas](../../../operations/system-tables/quotas.md).
### Пример {#create-quota-example} Параметры `queries`, `errors`, `result_rows`, `result_bytes`, `read_rows`, `read_bytes`, `execution_time` соответствуют полям таблицы [system.quotas_usage](../../../operations/system-tables/quotas_usage.md).
Ограничить максимальное количество запросов для текущего пользователя до 123 запросов каждые 15 месяцев: В секции `ON CLUSTER` можно указать кластеры, на которых создается квота, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md).
**Примеры**
Ограничить максимальное количество запросов для текущего пользователя — не более 123 запросов за каждые 15 месяцев:
``` sql ``` sql
CREATE QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 123 TO CURRENT_USER CREATE QUOTA qA FOR INTERVAL 15 month MAX queries = 123 TO CURRENT_USER;
```
Ограничить по умолчанию максимальное время выполнения запроса — не более полсекунды за каждые 30 минут, а также максимальное число запросов — не более 321 и максимальное число ошибок — не более 10 за каждые 5 кварталов:
``` sql
CREATE QUOTA qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default;
``` ```
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/quota) [Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/quota)

View File

@ -5,18 +5,16 @@ toc_title: "\u0420\u043e\u043b\u044c"
# CREATE ROLE {#create-role-statement} # CREATE ROLE {#create-role-statement}
Создает [роль](../../../operations/access-rights.md#role-management). Создает [роли](../../../operations/access-rights.md#role-management). Роль — это набор [привилегий](../grant.md#grant-privileges). Пользователь, которому назначена роль, получает все привилегии этой роли.
### Синтаксис {#create-role-syntax} Синтаксис:
```sql ```sql
CREATE ROLE [IF NOT EXISTS | OR REPLACE] name CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [, name2 ...]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
``` ```
### Описание {#create-role-description} ## Управление ролями {#managing-roles}
Роль — это набор [привилегий](../grant.md#grant-privileges). Пользователь, которому назначена роль, получает все привилегии этой роли.
Одному пользователю можно назначить несколько ролей. Пользователи могут применять назначенные роли в произвольных комбинациях с помощью выражения [SET ROLE](../misc.md#set-role-statement). Конечный объем привилегий — это комбинация всех привилегий всех примененных ролей. Если у пользователя имеются привилегии, присвоенные его аккаунту напрямую, они также прибавляются к привилегиям, присвоенным через роли. Одному пользователю можно назначить несколько ролей. Пользователи могут применять назначенные роли в произвольных комбинациях с помощью выражения [SET ROLE](../misc.md#set-role-statement). Конечный объем привилегий — это комбинация всех привилегий всех примененных ролей. Если у пользователя имеются привилегии, присвоенные его аккаунту напрямую, они также прибавляются к привилегиям, присвоенным через роли.
@ -26,7 +24,7 @@ CREATE ROLE [IF NOT EXISTS | OR REPLACE] name
Для удаления роли используется выражение [DROP ROLE](../misc.md#drop-role-statement). Удаленная роль автоматически отзывается у всех пользователей, которым была назначена. Для удаления роли используется выражение [DROP ROLE](../misc.md#drop-role-statement). Удаленная роль автоматически отзывается у всех пользователей, которым была назначена.
### Примеры {#create-role-examples} ## Примеры {#create-role-examples}
```sql ```sql
CREATE ROLE accountant; CREATE ROLE accountant;

View File

@ -5,19 +5,22 @@ toc_title: "\u041f\u043e\u043b\u0438\u0442\u0438\u043a\u0430\u0020\u0434\u043e\u
# CREATE ROW POLICY {#create-row-policy-statement} # CREATE ROW POLICY {#create-row-policy-statement}
Создает [фильтр для строк](../../../operations/access-rights.md#row-policy-management), которые пользователь может прочесть из таблицы. Создает [фильтры для строк](../../../operations/access-rights.md#row-policy-management), которые пользователь может прочесть из таблицы.
### Синтаксис {#create-row-policy-syntax} Синтаксис:
``` sql ``` sql
CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster_name] ON [db.]table CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1
[, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2 ...]
[AS {PERMISSIVE | RESTRICTIVE}] [AS {PERMISSIVE | RESTRICTIVE}]
[FOR SELECT] [FOR SELECT]
[USING condition] [USING condition]
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}] [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
``` ```
#### Секция AS {#create-row-policy-as} Секция `ON CLUSTER` позволяет создавать фильтры для строк на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md).
## Секция AS {#create-row-policy-as}
С помощью данной секции можно создать политику разрешения или ограничения. С помощью данной секции можно создать политику разрешения или ограничения.
@ -27,16 +30,17 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster
Ограничительные политики применяются к строкам, прошедшим фильтр разрешительной политики. Если вы не зададите разрешительные политики, пользователь не сможет обращаться ни к каким строкам из таблицы. Ограничительные политики применяются к строкам, прошедшим фильтр разрешительной политики. Если вы не зададите разрешительные политики, пользователь не сможет обращаться ни к каким строкам из таблицы.
#### Секция TO {#create-row-policy-to} ## Секция TO {#create-row-policy-to}
В секции `TO` вы можете перечислить как роли, так и пользователей. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`. В секции `TO` вы можете перечислить как роли, так и пользователей. Например, `CREATE ROW POLICY ... TO accountant, john@localhost`.
Ключевым словом `ALL` обозначаются все пользователи, включая текущего. Ключевые слова `ALL EXCEPT` позволяют исключить пользователей из списка всех пользователей. Например, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost` Ключевым словом `ALL` обозначаются все пользователи, включая текущего. Ключевые слова `ALL EXCEPT` позволяют исключить пользователей из списка всех пользователей. Например, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
### Примеры ## Примеры
- `CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost` `CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO accountant, john@localhost`
- `CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira`
`CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira`
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/row-policy) [Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/row-policy)
<!--hide--> <!--hide-->

View File

@ -5,16 +5,19 @@ toc_title: "\u041f\u0440\u043e\u0444\u0438\u043b\u044c\u0020\u043d\u0430\u0441\u
# CREATE SETTINGS PROFILE {#create-settings-profile-statement} # CREATE SETTINGS PROFILE {#create-settings-profile-statement}
Создает [профиль настроек](../../../operations/access-rights.md#settings-profiles-management), который может быть присвоен пользователю или роли. Создает [профили настроек](../../../operations/access-rights.md#settings-profiles-management), которые могут быть присвоены пользователю или роли.
### Синтаксис {#create-settings-profile-syntax} Синтаксис:
``` sql ``` sql
CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] TO name1 [ON CLUSTER cluster_name1]
[, name2 [ON CLUSTER cluster_name2] ...]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...]
``` ```
### Пример {#create-settings-profile-syntax} Секция `ON CLUSTER` позволяет создавать профили на кластере, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md).
## Пример {#create-settings-profile-syntax}
Создать профиль настроек `max_memory_usage_profile`, который содержит значение и ограничения для настройки `max_memory_usage`. Присвоить профиль пользователю `robin`: Создать профиль настроек `max_memory_usage_profile`, который содержит значение и ограничения для настройки `max_memory_usage`. Присвоить профиль пользователю `robin`:

View File

@ -5,19 +5,20 @@ toc_title: "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u
# CREATE USER {#create-user-statement} # CREATE USER {#create-user-statement}
Создает [аккаунт пользователя](../../../operations/access-rights.md#user-account-management). Создает [аккаунты пользователей](../../../operations/access-rights.md#user-account-management).
### Синтаксис {#create-user-syntax} Синтаксис:
```sql ```sql
CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
[, name2 [ON CLUSTER cluster_name2] ...]
[IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}]
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
[DEFAULT ROLE role [,...]] [DEFAULT ROLE role [,...]]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
``` ```
#### Идентификация ## Идентификация
Существует несколько способов идентификации пользователя: Существует несколько способов идентификации пользователя:
@ -28,7 +29,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
- `IDENTIFIED WITH double_sha1_password BY 'qwerty'` - `IDENTIFIED WITH double_sha1_password BY 'qwerty'`
- `IDENTIFIED WITH double_sha1_hash BY 'hash'` - `IDENTIFIED WITH double_sha1_hash BY 'hash'`
#### Пользовательский хост ## Пользовательский хост
Пользовательский хост — это хост, с которого можно установить соединение с сервером ClickHouse. Хост задается в секции `HOST` следующими способами: Пользовательский хост — это хост, с которого можно установить соединение с сервером ClickHouse. Хост задается в секции `HOST` следующими способами:
@ -49,7 +50,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
ClickHouse трактует конструкцию `user_name@'address'` как имя пользователя целиком. То есть технически вы можете создать несколько пользователей с одинаковыми `user_name`, но разными частями конструкции после `@`, но лучше так не делать. ClickHouse трактует конструкцию `user_name@'address'` как имя пользователя целиком. То есть технически вы можете создать несколько пользователей с одинаковыми `user_name`, но разными частями конструкции после `@`, но лучше так не делать.
### Примеры {#create-user-examples} ## Примеры {#create-user-examples}
Создать аккаунт `mira`, защищенный паролем `qwerty`: Создать аккаунт `mira`, защищенный паролем `qwerty`:
@ -69,7 +70,7 @@ CREATE USER john DEFAULT ROLE role1, role2
Создать аккаунт `john` и установить ролями по умолчанию все его будущие роли: Создать аккаунт `john` и установить ролями по умолчанию все его будущие роли:
``` sql ``` sql
ALTER USER user DEFAULT ROLE ALL CREATE USER user DEFAULT ROLE ALL
``` ```
Когда роль будет назначена аккаунту `john`, она автоматически станет ролью по умолчанию. Когда роль будет назначена аккаунту `john`, она автоматически станет ролью по умолчанию.
@ -77,7 +78,7 @@ ALTER USER user DEFAULT ROLE ALL
Создать аккаунт `john` и установить ролями по умолчанию все его будущие роли, кроме `role1` и `role2`: Создать аккаунт `john` и установить ролями по умолчанию все его будущие роли, кроме `role1` и `role2`:
``` sql ``` sql
ALTER USER john DEFAULT ROLE ALL EXCEPT role1, role2 CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2
``` ```
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/user) [Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/user)

View File

@ -13,7 +13,7 @@ toc_title: INSERT INTO
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
``` ```
Вы можете указать список столбцов для вставки, используя синтаксис `(c1, c2, c3)`. Также можно использовать выражение cо [звездочкой](../../sql-reference/statements/select/index.md#asterisk) и/или модификаторами, такими как [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#except-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier). Вы можете указать список столбцов для вставки, используя синтаксис `(c1, c2, c3)`. Также можно использовать выражение cо [звездочкой](../../sql-reference/statements/select/index.md#asterisk) и/или модификаторами, такими как `APPLY`, `EXCEPT`, `REPLACE`.
В качестве примера рассмотрим таблицу: В качестве примера рассмотрим таблицу:
@ -30,8 +30,7 @@ SHOW CREATE insert_select_testtable
`c` Int8 `c` Int8
) )
ENGINE = MergeTree() ENGINE = MergeTree()
ORDER BY a ORDER BY a │
SETTINGS index_granularity = 8192 │
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
``` ```

View File

@ -25,6 +25,8 @@ toc_title: FROM
- [Replicated](../../../engines/table-engines/mergetree-family/replication.md) варианты исполнения `MergeTree` движков. - [Replicated](../../../engines/table-engines/mergetree-family/replication.md) варианты исполнения `MergeTree` движков.
- [View](../../../engines/table-engines/special/view.md), [Buffer](../../../engines/table-engines/special/buffer.md), [Distributed](../../../engines/table-engines/special/distributed.md), и [MaterializedView](../../../engines/table-engines/special/materializedview.md), которые работают поверх других движков, если они созданы для таблиц с движками семейства `MergeTree`. - [View](../../../engines/table-engines/special/view.md), [Buffer](../../../engines/table-engines/special/buffer.md), [Distributed](../../../engines/table-engines/special/distributed.md), и [MaterializedView](../../../engines/table-engines/special/materializedview.md), которые работают поверх других движков, если они созданы для таблиц с движками семейства `MergeTree`.
Теперь `SELECT` запросы с `FINAL` выполняются параллельно и, следовательно, немного быстрее. Но имеются серьезные недостатки при их использовании (смотрите ниже). Настройка [max_final_threads](../../../operations/settings/settings.md#max-final-threads) устанавливает максимальное количество потоков.
### Недостатки {#drawbacks} ### Недостатки {#drawbacks}
Запросы, которые используют `FINAL` выполняются немного медленее, чем аналогичные запросы без него, потому что: Запросы, которые используют `FINAL` выполняются немного медленее, чем аналогичные запросы без него, потому что:

View File

@ -249,11 +249,9 @@ SHOW GRANTS [FOR user]
### Синтаксис {#show-create-user-syntax} ### Синтаксис {#show-create-user-syntax}
``` sql ``` sql
SHOW CREATE USER [name | CURRENT_USER] SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER]
``` ```
## SHOW CREATE ROLE {#show-create-role-statement} ## SHOW CREATE ROLE {#show-create-role-statement}
Выводит параметры, использованные при [создании роли](create/role.md#create-role-statement). Выводит параметры, использованные при [создании роли](create/role.md#create-role-statement).
@ -261,11 +259,9 @@ SHOW CREATE USER [name | CURRENT_USER]
### Синтаксис {#show-create-role-syntax} ### Синтаксис {#show-create-role-syntax}
``` sql ``` sql
SHOW CREATE ROLE name SHOW CREATE ROLE name1 [, name2 ...]
``` ```
## SHOW CREATE ROW POLICY {#show-create-row-policy-statement} ## SHOW CREATE ROW POLICY {#show-create-row-policy-statement}
Выводит параметры, использованные при [создании политики доступа к строкам](create/row-policy.md#create-row-policy-statement). Выводит параметры, использованные при [создании политики доступа к строкам](create/row-policy.md#create-row-policy-statement).
@ -273,10 +269,9 @@ SHOW CREATE ROLE name
### Синтаксис {#show-create-row-policy-syntax} ### Синтаксис {#show-create-row-policy-syntax}
```sql ```sql
SHOW CREATE [ROW] POLICY name ON [database.]table SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...]
``` ```
## SHOW CREATE QUOTA {#show-create-quota-statement} ## SHOW CREATE QUOTA {#show-create-quota-statement}
Выводит параметры, использованные при [создании квоты](create/quota.md#create-quota-statement). Выводит параметры, использованные при [создании квоты](create/quota.md#create-quota-statement).
@ -284,10 +279,9 @@ SHOW CREATE [ROW] POLICY name ON [database.]table
### Синтаксис {#show-create-row-policy-syntax} ### Синтаксис {#show-create-row-policy-syntax}
```sql ```sql
SHOW CREATE QUOTA [name | CURRENT] SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT]
``` ```
## SHOW CREATE SETTINGS PROFILE {#show-create-settings-profile-statement} ## SHOW CREATE SETTINGS PROFILE {#show-create-settings-profile-statement}
Выводит параметры, использованные при [создании профиля настроек](create/settings-profile.md#create-settings-profile-statement). Выводит параметры, использованные при [создании профиля настроек](create/settings-profile.md#create-settings-profile-statement).
@ -295,10 +289,9 @@ SHOW CREATE QUOTA [name | CURRENT]
### Синтаксис {#show-create-row-policy-syntax} ### Синтаксис {#show-create-row-policy-syntax}
```sql ```sql
SHOW CREATE [SETTINGS] PROFILE name SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...]
``` ```
## SHOW USERS {#show-users-statement} ## SHOW USERS {#show-users-statement}
Выводит список [пользовательских аккаунтов](../../operations/access-rights.md#user-account-management). Для просмотра параметров пользовательских аккаунтов, см. системную таблицу [system.users](../../operations/system-tables/users.md#system_tables-users). Выводит список [пользовательских аккаунтов](../../operations/access-rights.md#user-account-management). Для просмотра параметров пользовательских аккаунтов, см. системную таблицу [system.users](../../operations/system-tables/users.md#system_tables-users).
@ -359,4 +352,14 @@ SHOW QUOTAS
SHOW [CURRENT] QUOTA SHOW [CURRENT] QUOTA
``` ```
## SHOW ACCESS {#show-access-statement}
Выводит список всех [пользователей](../../operations/access-rights.md#user-account-management), [ролей](../../operations/access-rights.md#role-management), [профилей](../../operations/access-rights.md#settings-profiles-management) и пр., а также все [привилегии](../../sql-reference/statements/grant.md#grant-privileges).
### Синтаксис {#show-access-syntax}
``` sql
SHOW ACCESS
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/show/) <!--hide--> [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/show/) <!--hide-->

View File

@ -118,7 +118,7 @@ for (auto & stream : streams)
stream.second->finalize(); stream.second->finalize();
``` ```
**18.** 行的尾不应该包含空格。 **18.** 行的尾不应该包含空格。
**19.** 源文件应该用 UTF-8 编码。 **19.** 源文件应该用 UTF-8 编码。

View File

@ -254,7 +254,6 @@ ENGINE = MergeTree()
PARTITION BY toYYYYMM(EventDate) PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID)) ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
``` sql ``` sql
@ -450,7 +449,6 @@ ENGINE = CollapsingMergeTree(Sign)
PARTITION BY toYYYYMM(StartDate) PARTITION BY toYYYYMM(StartDate)
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID) SAMPLE BY intHash32(UserID)
SETTINGS index_granularity = 8192
``` ```
您可以使用`clickhouse-client`的交互模式执行这些查询(只需在终端中启动它,而不需要提前指定查询)。或者如果你愿意,可以尝试一些[替代接口](../interfaces/index.md)。 您可以使用`clickhouse-client`的交互模式执行这些查询(只需在终端中启动它,而不需要提前指定查询)。或者如果你愿意,可以尝试一些[替代接口](../interfaces/index.md)。

View File

@ -25,7 +25,6 @@ CREATE TABLE insert_select_testtable
) )
ENGINE = MergeTree() ENGINE = MergeTree()
ORDER BY a ORDER BY a
SETTINGS index_granularity = 8192
``` ```
``` sql ``` sql

View File

@ -229,7 +229,7 @@ public:
{ {
for (const auto & x : small) for (const auto & x : small)
{ {
if (!rb->contains(static_cast<Value>(x.getValue()))) if (!r1.rb->contains(static_cast<Value>(x.getValue())))
buffer.push_back(x.getValue()); buffer.push_back(x.getValue());
} }

View File

@ -4,7 +4,6 @@
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
#include <Common/WeakHash.h> #include <Common/WeakHash.h>
#include <Common/HashTable/Hash.h> #include <Common/HashTable/Hash.h>
#include <Core/BigInt.h>
#include <common/unaligned.h> #include <common/unaligned.h>
#include <common/sort.h> #include <common/sort.h>

View File

@ -36,35 +36,18 @@ namespace ErrorCodes
template <typename T> template <typename T>
StringRef ColumnVector<T>::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const StringRef ColumnVector<T>::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
{
if constexpr (is_big_int_v<T>)
{
static constexpr size_t bytesize = BigInt<T>::size;
char * pos = arena.allocContinue(bytesize, begin);
return BigInt<T>::serialize(data[n], pos);
}
else
{ {
auto * pos = arena.allocContinue(sizeof(T), begin); auto * pos = arena.allocContinue(sizeof(T), begin);
unalignedStore<T>(pos, data[n]); unalignedStore<T>(pos, data[n]);
return StringRef(pos, sizeof(T)); return StringRef(pos, sizeof(T));
} }
}
template <typename T> template <typename T>
const char * ColumnVector<T>::deserializeAndInsertFromArena(const char * pos) const char * ColumnVector<T>::deserializeAndInsertFromArena(const char * pos)
{
if constexpr (is_big_int_v<T>)
{
data.emplace_back(BigInt<T>::deserialize(pos));
return pos + BigInt<T>::size;
}
else
{ {
data.emplace_back(unalignedLoad<T>(pos)); data.emplace_back(unalignedLoad<T>(pos));
return pos + sizeof(T); return pos + sizeof(T);
} }
}
template <typename T> template <typename T>
void ColumnVector<T>::updateHashWithValue(size_t n, SipHash & hash) const void ColumnVector<T>::updateHashWithValue(size_t n, SipHash & hash) const
@ -299,19 +282,11 @@ MutableColumnPtr ColumnVector<T>::cloneResized(size_t size) const
new_col.data.resize(size); new_col.data.resize(size);
size_t count = std::min(this->size(), size); size_t count = std::min(this->size(), size);
if constexpr (is_POD)
{
memcpy(new_col.data.data(), data.data(), count * sizeof(data[0])); memcpy(new_col.data.data(), data.data(), count * sizeof(data[0]));
if (size > count) if (size > count)
memset(static_cast<void *>(&new_col.data[count]), static_cast<int>(ValueType()), (size - count) * sizeof(ValueType)); memset(static_cast<void *>(&new_col.data[count]), static_cast<int>(ValueType()), (size - count) * sizeof(ValueType));
} }
else
{
for (size_t i = 0; i < count; i++)
new_col.data[i] = data[i];
}
}
return res; return res;
} }
@ -348,16 +323,8 @@ void ColumnVector<T>::insertRangeFrom(const IColumn & src, size_t start, size_t
size_t old_size = data.size(); size_t old_size = data.size();
data.resize(old_size + length); data.resize(old_size + length);
if constexpr (is_POD)
{
memcpy(data.data() + old_size, &src_vec.data[start], length * sizeof(data[0])); memcpy(data.data() + old_size, &src_vec.data[start], length * sizeof(data[0]));
} }
else
{
for (size_t i = 0; i < length; i++)
data[old_size + i] = src_vec.data[start + i];
}
}
template <typename T> template <typename T>
ColumnPtr ColumnVector<T>::filter(const IColumn::Filter & filt, ssize_t result_size_hint) const ColumnPtr ColumnVector<T>::filter(const IColumn::Filter & filt, ssize_t result_size_hint) const
@ -372,8 +339,6 @@ ColumnPtr ColumnVector<T>::filter(const IColumn::Filter & filt, ssize_t result_s
if (result_size_hint) if (result_size_hint)
res_data.reserve(result_size_hint > 0 ? result_size_hint : size); res_data.reserve(result_size_hint > 0 ? result_size_hint : size);
if constexpr (is_POD)
{
const UInt8 * filt_pos = filt.data(); const UInt8 * filt_pos = filt.data();
const UInt8 * filt_end = filt_pos + size; const UInt8 * filt_end = filt_pos + size;
const T * data_pos = data.data(); const T * data_pos = data.data();
@ -421,22 +386,6 @@ ColumnPtr ColumnVector<T>::filter(const IColumn::Filter & filt, ssize_t result_s
++filt_pos; ++filt_pos;
++data_pos; ++data_pos;
} }
}
else
{
const auto * filt_pos = filt.begin();
const auto * filt_end = filt.end();
auto data_pos = data.begin();
while (filt_pos < filt_end)
{
if (*filt_pos)
res_data.push_back(*data_pos);
++filt_pos;
++data_pos;
}
}
return res; return res;
} }

View File

@ -6,7 +6,6 @@
#include <Columns/ColumnVectorHelper.h> #include <Columns/ColumnVectorHelper.h>
#include <common/unaligned.h> #include <common/unaligned.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <Core/BigInt.h>
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
@ -107,10 +106,7 @@ private:
public: public:
using ValueType = T; using ValueType = T;
static constexpr bool is_POD = !is_big_int_v<T>; using Container = PaddedPODArray<ValueType>;
using Container = std::conditional_t<is_POD,
PaddedPODArray<ValueType>,
std::vector<ValueType>>;
private: private:
ColumnVector() {} ColumnVector() {}
@ -136,10 +132,7 @@ public:
void insertData(const char * pos, size_t) override void insertData(const char * pos, size_t) override
{ {
if constexpr (is_POD)
data.emplace_back(unalignedLoad<T>(pos)); data.emplace_back(unalignedLoad<T>(pos));
else
data.emplace_back(BigInt<T>::deserialize(pos));
} }
void insertDefault() override void insertDefault() override
@ -149,18 +142,12 @@ public:
void insertManyDefaults(size_t length) override void insertManyDefaults(size_t length) override
{ {
if constexpr (is_POD)
data.resize_fill(data.size() + length, T()); data.resize_fill(data.size() + length, T());
else
data.resize(data.size() + length, T());
} }
void popBack(size_t n) override void popBack(size_t n) override
{ {
if constexpr (is_POD)
data.resize_assume_reserved(data.size() - n); data.resize_assume_reserved(data.size() - n);
else
data.resize(data.size() - n);
} }
StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override; StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
@ -185,15 +172,11 @@ public:
size_t allocatedBytes() const override size_t allocatedBytes() const override
{ {
if constexpr (is_POD)
return data.allocated_bytes(); return data.allocated_bytes();
else
return data.capacity() * sizeof(data[0]);
} }
void protect() override void protect() override
{ {
if constexpr (is_POD)
data.protect(); data.protect();
} }

View File

@ -1,41 +0,0 @@
#include <Common/DirectorySyncGuard.h>
#include <Common/Exception.h>
#include <Disks/IDisk.h>
#include <fcntl.h> // O_RDWR
/// OSX does not have O_DIRECTORY
#ifndef O_DIRECTORY
#define O_DIRECTORY O_RDWR
#endif
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_FSYNC;
}
DirectorySyncGuard::DirectorySyncGuard(const DiskPtr & disk_, const String & path)
: disk(disk_)
, fd(disk_->open(path, O_DIRECTORY))
{}
DirectorySyncGuard::~DirectorySyncGuard()
{
try
{
#if defined(OS_DARWIN)
if (fcntl(fd, F_FULLFSYNC, 0))
throwFromErrno("Cannot fcntl(F_FULLFSYNC)", ErrorCodes::CANNOT_FSYNC);
#endif
disk->sync(fd);
disk->close(fd);
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
}

View File

@ -235,7 +235,7 @@ public:
else if constexpr (std::is_same_v<T, UInt128>) else if constexpr (std::is_same_v<T, UInt128>)
throw Exception("No conversion to old UInt128 from " + demangle(typeid(U).name()), ErrorCodes::NOT_IMPLEMENTED); throw Exception("No conversion to old UInt128 from " + demangle(typeid(U).name()), ErrorCodes::NOT_IMPLEMENTED);
else else
return bigint_cast<T>(x); return static_cast<T>(x);
} }
}; };

View File

@ -1,7 +1,6 @@
#pragma once #pragma once
#include <common/types.h> #include <common/types.h>
#include <Core/BigInt.h>
#include <Common/UInt128.h> #include <Common/UInt128.h>
#include <common/unaligned.h> #include <common/unaligned.h>

View File

@ -18,7 +18,7 @@
#include <string> #include <string>
#include <type_traits> #include <type_traits>
#include <Core/Defines.h> #include <Core/Defines.h>
#include <Core/BigInt.h>
#define ROTL(x, b) static_cast<UInt64>(((x) << (b)) | ((x) >> (64 - (b)))) #define ROTL(x, b) static_cast<UInt64>(((x) << (b)) | ((x) >> (64 - (b))))
@ -136,23 +136,11 @@ public:
} }
template <typename T> template <typename T>
std::enable_if_t<std::has_unique_object_representations_v<T>, void> update(const T & x) void update(const T & x)
{ {
update(reinterpret_cast<const char *>(&x), sizeof(x)); update(reinterpret_cast<const char *>(&x), sizeof(x));
} }
template <typename T>
std::enable_if_t<(std::is_floating_point_v<T> || std::is_same_v<T, CityHash_v1_0_2::uint128>), void> update(const T & x)
{
update(reinterpret_cast<const char *>(&x), sizeof(x));
}
template <typename T>
std::enable_if_t<is_big_int_v<T> && !std::has_unique_object_representations_v<T>, void> update(const T & x)
{
update(DB::BigInt<T>::serialize(x));
}
void update(const std::string & x) void update(const std::string & x)
{ {
update(x.data(), x.length()); update(x.data(), x.length());
@ -205,27 +193,13 @@ inline UInt64 sipHash64(const char * data, const size_t size)
} }
template <typename T> template <typename T>
std::enable_if_t<std::has_unique_object_representations_v<T>, UInt64> sipHash64(const T & x) UInt64 sipHash64(const T & x)
{ {
SipHash hash; SipHash hash;
hash.update(x); hash.update(x);
return hash.get64(); return hash.get64();
} }
template <typename T>
std::enable_if_t<(std::is_floating_point_v<T> || (is_big_int_v<T> && !std::has_unique_object_representations_v<T>)), UInt64> sipHash64(const T & x)
{
SipHash hash;
hash.update(x);
return hash.get64();
}
template <typename T>
std::enable_if_t<DB::IsDecimalNumber<T>, UInt64> sipHash64(const T & x)
{
return sipHash64(x.value);
}
inline UInt64 sipHash64(const std::string & s) inline UInt64 sipHash64(const std::string & s)
{ {
return sipHash64(s.data(), s.size()); return sipHash64(s.data(), s.size());

View File

@ -18,8 +18,7 @@ namespace zkutil
void TestKeeperStorageDispatcher::processingThread() void TestKeeperStorageDispatcher::processingThread()
{ {
setThreadName("TestKeeperSProc"); setThreadName("TestKeeperSProc");
try
{
while (!shutdown) while (!shutdown)
{ {
RequestInfo info; RequestInfo info;
@ -31,16 +30,17 @@ void TestKeeperStorageDispatcher::processingThread()
if (shutdown) if (shutdown)
break; break;
try
{
auto responses = storage.processRequest(info.request, info.session_id); auto responses = storage.processRequest(info.request, info.session_id);
for (const auto & response_for_session : responses) for (const auto & response_for_session : responses)
setResponse(response_for_session.session_id, response_for_session.response); setResponse(response_for_session.session_id, response_for_session.response);
} }
}
}
catch (...) catch (...)
{ {
tryLogCurrentException(__PRETTY_FUNCTION__); tryLogCurrentException(__PRETTY_FUNCTION__);
finalize(); }
}
} }
} }

View File

@ -37,7 +37,6 @@ SRCS(
CurrentMetrics.cpp CurrentMetrics.cpp
CurrentThread.cpp CurrentThread.cpp
DNSResolver.cpp DNSResolver.cpp
DirectorySyncGuard.cpp
Dwarf.cpp Dwarf.cpp
Elf.cpp Elf.cpp
ErrorCodes.cpp ErrorCodes.cpp

View File

@ -1,6 +1,6 @@
#pragma once #pragma once
#include <common/types.h> #include <Core/Types.h>
#include <Compression/ICompressionCodec.h> #include <Compression/ICompressionCodec.h>

View File

@ -93,7 +93,7 @@ using bool_if_gt_int_vs_uint = std::enable_if_t<is_gt_int_vs_uint<TInt, TUInt>,
template <typename TInt, typename TUInt> template <typename TInt, typename TUInt>
inline bool_if_gt_int_vs_uint<TInt, TUInt> greaterOpTmpl(TInt a, TUInt b) inline bool_if_gt_int_vs_uint<TInt, TUInt> greaterOpTmpl(TInt a, TUInt b)
{ {
return bigint_cast<TInt>(a) > bigint_cast<TInt>(b); return static_cast<TInt>(a) > static_cast<TInt>(b);
} }
template <typename TInt, typename TUInt> template <typename TInt, typename TUInt>
@ -101,19 +101,19 @@ inline bool_if_gt_int_vs_uint<TInt, TUInt> greaterOpTmpl(TUInt a, TInt b)
{ {
using CastA = std::conditional_t<is_big_int_v<TInt> && std::is_same_v<TUInt, DB::UInt128>, DB::UInt256, TInt>; using CastA = std::conditional_t<is_big_int_v<TInt> && std::is_same_v<TUInt, DB::UInt128>, DB::UInt256, TInt>;
return bigint_cast<CastA>(a) > b; return static_cast<CastA>(a) > b;
} }
template <typename TInt, typename TUInt> template <typename TInt, typename TUInt>
inline bool_if_gt_int_vs_uint<TInt, TUInt> equalsOpTmpl(TInt a, TUInt b) inline bool_if_gt_int_vs_uint<TInt, TUInt> equalsOpTmpl(TInt a, TUInt b)
{ {
return bigint_cast<TInt>(a) == bigint_cast<TInt>(b); return static_cast<TInt>(a) == static_cast<TInt>(b);
} }
template <typename TInt, typename TUInt> template <typename TInt, typename TUInt>
inline bool_if_gt_int_vs_uint<TInt, TUInt> equalsOpTmpl(TUInt a, TInt b) inline bool_if_gt_int_vs_uint<TInt, TUInt> equalsOpTmpl(TUInt a, TInt b)
{ {
return bigint_cast<TInt>(a) == bigint_cast<TInt>(b); return static_cast<TInt>(a) == static_cast<TInt>(b);
} }
@ -196,7 +196,7 @@ inline bool_if_safe_conversion<A, B> greaterOp(A a, B b)
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>; using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
if constexpr (is_big_int_v<A> || is_big_int_v<B>) if constexpr (is_big_int_v<A> || is_big_int_v<B>)
return bigint_cast<CastA>(a) > bigint_cast<CastB>(b); return static_cast<CastA>(a) > static_cast<CastB>(b);
else else
return a > b; return a > b;
} }
@ -306,7 +306,7 @@ inline bool_if_safe_conversion<A, B> equalsOp(A a, B b)
{ {
using LargestType = std::conditional_t<(sizeof(A) > sizeof(B)) || ((sizeof(A) == sizeof(B)) && !std::is_same_v<A, DB::UInt128>), A, B>; using LargestType = std::conditional_t<(sizeof(A) > sizeof(B)) || ((sizeof(A) == sizeof(B)) && !std::is_same_v<A, DB::UInt128>), A, B>;
return bigint_cast<LargestType>(a) == bigint_cast<LargestType>(b); return static_cast<LargestType>(a) == static_cast<LargestType>(b);
} }
template <> template <>
@ -429,7 +429,7 @@ inline bool_if_safe_conversion<A, B> notEqualsOp(A a, B b)
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>; using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
if constexpr (is_big_int_v<A> || is_big_int_v<B>) if constexpr (is_big_int_v<A> || is_big_int_v<B>)
return bigint_cast<CastA>(a) != bigint_cast<CastB>(b); return static_cast<CastA>(a) != static_cast<CastB>(b);
else else
return a != b; return a != b;
} }
@ -451,7 +451,7 @@ inline bool_if_safe_conversion<A, B> lessOp(A a, B b)
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>; using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
if constexpr (is_big_int_v<A> || is_big_int_v<B>) if constexpr (is_big_int_v<A> || is_big_int_v<B>)
return bigint_cast<CastA>(a) < bigint_cast<CastB>(b); return static_cast<CastA>(a) < static_cast<CastB>(b);
else else
return a < b; return a < b;
} }
@ -475,7 +475,7 @@ inline bool_if_safe_conversion<A, B> lessOrEqualsOp(A a, B b)
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>; using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
if constexpr (is_big_int_v<A> || is_big_int_v<B>) if constexpr (is_big_int_v<A> || is_big_int_v<B>)
return bigint_cast<CastA>(a) <= bigint_cast<CastB>(b); return static_cast<CastA>(a) <= static_cast<CastB>(b);
else else
return a <= b; return a <= b;
} }
@ -499,7 +499,7 @@ inline bool_if_safe_conversion<A, B> greaterOrEqualsOp(A a, B b)
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>; using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
if constexpr (is_big_int_v<A> || is_big_int_v<B>) if constexpr (is_big_int_v<A> || is_big_int_v<B>)
return bigint_cast<CastA>(a) >= bigint_cast<CastB>(b); return static_cast<CastA>(a) >= static_cast<CastB>(b);
else else
return a >= b; return a >= b;
} }

View File

@ -1,36 +0,0 @@
#pragma once
#include <common/StringRef.h>
#include <common/unaligned.h>
#include <Core/Types.h>
namespace DB
{
template <typename T>
struct BigInt
{
static_assert(sizeof(T) == 32);
static constexpr size_t size = 32;
static StringRef serialize(const T & x, char * pos)
{
unalignedStore<T>(pos, x);
return StringRef(pos, size);
}
static String serialize(const T & x)
{
String str(size, '\0');
serialize(x, str.data());
return str;
}
static T deserialize(const char * pos)
{
return unalignedLoad<T>(pos);
}
};
}

View File

@ -233,9 +233,9 @@ private:
bool overflow = false; bool overflow = false;
if constexpr (sizeof(A) > sizeof(CompareInt)) if constexpr (sizeof(A) > sizeof(CompareInt))
overflow |= (bigint_cast<A>(x) != a); overflow |= (static_cast<A>(x) != a);
if constexpr (sizeof(B) > sizeof(CompareInt)) if constexpr (sizeof(B) > sizeof(CompareInt))
overflow |= (bigint_cast<B>(y) != b); overflow |= (static_cast<B>(y) != b);
if constexpr (is_unsigned_v<A>) if constexpr (is_unsigned_v<A>)
overflow |= (x < 0); overflow |= (x < 0);
if constexpr (is_unsigned_v<B>) if constexpr (is_unsigned_v<B>)

View File

@ -405,7 +405,7 @@ class IColumn;
M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \
M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \ M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \
M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \ M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \
M(Bool, enable_global_with_statement, false, "Propagate WITH statements to UNION queries and all subqueries", 0) \ M(Bool, enable_global_with_statement, true, "Propagate WITH statements to UNION queries and all subqueries", 0) \
M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \ M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \
M(Bool, flatten_nested, true, "If true, columns of type Nested will be flatten to separate array columns instead of one array of tuples", 0) \ M(Bool, flatten_nested, true, "If true, columns of type Nested will be flatten to separate array columns instead of one array of tuples", 0) \
M(Bool, asterisk_include_materialized_columns, false, "Include MATERIALIZED columns for wildcard query", 0) \ M(Bool, asterisk_include_materialized_columns, false, "Include MATERIALIZED columns for wildcard query", 0) \

View File

@ -158,7 +158,7 @@ struct Decimal
return convertTo<typename U::NativeType>(); return convertTo<typename U::NativeType>();
} }
else else
return bigint_cast<U>(value); return static_cast<U>(value);
} }
const Decimal<T> & operator += (const T & x) { value += x; return *this; } const Decimal<T> & operator += (const T & x) { value += x; return *this; }

View File

@ -29,8 +29,14 @@ namespace ErrorCodes
DataTypePtr DataTypeFactory::get(const String & full_name) const DataTypePtr DataTypeFactory::get(const String & full_name) const
{ {
/// Data type parser can be invoked from coroutines with small stack.
/// Value 315 is known to cause stack overflow in some test configurations (debug build, sanitizers)
/// let's make the threshold significantly lower.
/// It is impractical for user to have complex data types with this depth.
static constexpr size_t data_type_max_parse_depth = 200;
ParserDataType parser; ParserDataType parser;
ASTPtr ast = parseQuery(parser, full_name.data(), full_name.data() + full_name.size(), "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); ASTPtr ast = parseQuery(parser, full_name.data(), full_name.data() + full_name.size(), "data type", 0, data_type_max_parse_depth);
return get(ast); return get(ast);
} }

View File

@ -218,7 +218,7 @@ using ResultOfGreatest = std::conditional_t<LeastGreatestSpecialCase<A, B>,
template <typename T> template <typename T>
static inline auto littleBits(const T & x) static inline auto littleBits(const T & x)
{ {
return bigint_cast<UInt8>(x); return static_cast<UInt8>(x);
} }
} }

View File

@ -45,7 +45,7 @@ DataTypePtr convertMySQLDataType(MultiEnum<MySQLDataTypesSupport> type_support,
// 4. type_with_params(param1, param2, ...) options // 4. type_with_params(param1, param2, ...) options
// The options can be unsigned, zerofill, or some other strings. // The options can be unsigned, zerofill, or some other strings.
auto data_type = std::string_view(mysql_data_type); auto data_type = std::string_view(mysql_data_type);
const auto type_end_pos = data_type.find_first_of("(\x20"); // FIXME: fix style-check script instead const auto type_end_pos = data_type.find_first_of(R"(( )"); // FIXME: fix style-check script instead
const auto type_name = data_type.substr(0, type_end_pos); const auto type_name = data_type.substr(0, type_end_pos);
DataTypePtr res; DataTypePtr res;

View File

@ -344,7 +344,9 @@ std::vector<DictionaryAttribute> DictionaryStructure::getAttributes(
} }
catch (Exception & e) catch (Exception & e)
{ {
e.addMessage("error parsing null_value"); String dictionary_name = config.getString(".dictionary.name", "");
e.addMessage("While parsing null_value for attribute with name " + name
+ " in dictionary " + dictionary_name);
throw; throw;
} }
} }

View File

@ -175,24 +175,14 @@ void DiskDecorator::truncateFile(const String & path, size_t size)
delegate->truncateFile(path, size); delegate->truncateFile(path, size);
} }
int DiskDecorator::open(const String & path, int flags) const
{
return delegate->open(path, flags);
}
void DiskDecorator::close(int fd) const
{
delegate->close(fd);
}
void DiskDecorator::sync(int fd) const
{
delegate->sync(fd);
}
Executor & DiskDecorator::getExecutor() Executor & DiskDecorator::getExecutor()
{ {
return delegate->getExecutor(); return delegate->getExecutor();
} }
SyncGuardPtr DiskDecorator::getDirectorySyncGuard(const String & path) const
{
return delegate->getDirectorySyncGuard(path);
}
} }

View File

@ -48,11 +48,9 @@ public:
void setReadOnly(const String & path) override; void setReadOnly(const String & path) override;
void createHardLink(const String & src_path, const String & dst_path) override; void createHardLink(const String & src_path, const String & dst_path) override;
void truncateFile(const String & path, size_t size) override; void truncateFile(const String & path, size_t size) override;
int open(const String & path, int flags) const override;
void close(int fd) const override;
void sync(int fd) const override;
const String getType() const override { return delegate->getType(); } const String getType() const override { return delegate->getType(); }
Executor & getExecutor() override; Executor & getExecutor() override;
SyncGuardPtr getDirectorySyncGuard(const String & path) const override;
protected: protected:
DiskPtr delegate; DiskPtr delegate;

View File

@ -5,6 +5,7 @@
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
#include <Common/filesystemHelpers.h> #include <Common/filesystemHelpers.h>
#include <Common/quoteString.h> #include <Common/quoteString.h>
#include <Disks/LocalDirectorySyncGuard.h>
#include <IO/createReadBufferFromFileBase.h> #include <IO/createReadBufferFromFileBase.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
@ -20,10 +21,6 @@ namespace ErrorCodes
extern const int EXCESSIVE_ELEMENT_IN_CONFIG; extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
extern const int PATH_ACCESS_DENIED; extern const int PATH_ACCESS_DENIED;
extern const int INCORRECT_DISK_INDEX; extern const int INCORRECT_DISK_INDEX;
extern const int FILE_DOESNT_EXIST;
extern const int CANNOT_OPEN_FILE;
extern const int CANNOT_FSYNC;
extern const int CANNOT_CLOSE_FILE;
extern const int CANNOT_TRUNCATE_FILE; extern const int CANNOT_TRUNCATE_FILE;
extern const int CANNOT_UNLINK; extern const int CANNOT_UNLINK;
extern const int CANNOT_RMDIR; extern const int CANNOT_RMDIR;
@ -315,26 +312,9 @@ void DiskLocal::copy(const String & from_path, const std::shared_ptr<IDisk> & to
IDisk::copy(from_path, to_disk, to_path); /// Copy files through buffers. IDisk::copy(from_path, to_disk, to_path); /// Copy files through buffers.
} }
int DiskLocal::open(const String & path, int flags) const SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
{ {
String full_path = disk_path + path; return std::make_unique<LocalDirectorySyncGuard>(disk_path + path);
int fd = ::open(full_path.c_str(), flags);
if (-1 == fd)
throwFromErrnoWithPath("Cannot open file " + full_path, full_path,
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);
return fd;
}
void DiskLocal::close(int fd) const
{
if (-1 == ::close(fd))
throw Exception("Cannot close file", ErrorCodes::CANNOT_CLOSE_FILE);
}
void DiskLocal::sync(int fd) const
{
if (-1 == ::fsync(fd))
throw Exception("Cannot fsync", ErrorCodes::CANNOT_FSYNC);
} }
DiskPtr DiskLocalReservation::getDisk(size_t i) const DiskPtr DiskLocalReservation::getDisk(size_t i) const

View File

@ -98,14 +98,12 @@ public:
void createHardLink(const String & src_path, const String & dst_path) override; void createHardLink(const String & src_path, const String & dst_path) override;
int open(const String & path, int flags) const override;
void close(int fd) const override;
void sync(int fd) const override;
void truncateFile(const String & path, size_t size) override; void truncateFile(const String & path, size_t size) override;
const String getType() const override { return "local"; } const String getType() const override { return "local"; }
SyncGuardPtr getDirectorySyncGuard(const String & path) const override;
private: private:
bool tryReserve(UInt64 bytes); bool tryReserve(UInt64 bytes);

View File

@ -436,21 +436,6 @@ void DiskMemory::setReadOnly(const String &)
throw Exception("Method setReadOnly is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED); throw Exception("Method setReadOnly is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
} }
int DiskMemory::open(const String & /*path*/, int /*flags*/) const
{
throw Exception("Method open is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
}
void DiskMemory::close(int /*fd*/) const
{
throw Exception("Method close is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
}
void DiskMemory::sync(int /*fd*/) const
{
throw Exception("Method sync is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
}
void DiskMemory::truncateFile(const String & path, size_t size) void DiskMemory::truncateFile(const String & path, size_t size)
{ {
std::lock_guard lock(mutex); std::lock_guard lock(mutex);

View File

@ -89,10 +89,6 @@ public:
void createHardLink(const String & src_path, const String & dst_path) override; void createHardLink(const String & src_path, const String & dst_path) override;
int open(const String & path, int flags) const override;
void close(int fd) const override;
void sync(int fd) const override;
void truncateFile(const String & path, size_t size) override; void truncateFile(const String & path, size_t size) override;
const String getType() const override { return "memory"; } const String getType() const override { return "memory"; }

View File

@ -76,4 +76,9 @@ void IDisk::truncateFile(const String &, size_t)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Truncate operation is not implemented for disk of type {}", getType()); throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Truncate operation is not implemented for disk of type {}", getType());
} }
SyncGuardPtr IDisk::getDirectorySyncGuard(const String & /* path */) const
{
return nullptr;
}
} }

View File

@ -57,6 +57,19 @@ public:
using SpacePtr = std::shared_ptr<Space>; using SpacePtr = std::shared_ptr<Space>;
/**
* A guard, that should synchronize file's or directory's state
* with storage device (e.g. fsync in POSIX) in its destructor.
*/
class ISyncGuard
{
public:
ISyncGuard() = default;
virtual ~ISyncGuard() = default;
};
using SyncGuardPtr = std::unique_ptr<ISyncGuard>;
/** /**
* A unit of storage persisting data and metadata. * A unit of storage persisting data and metadata.
* Abstract underlying storage technology. * Abstract underlying storage technology.
@ -174,15 +187,6 @@ public:
/// Create hardlink from `src_path` to `dst_path`. /// Create hardlink from `src_path` to `dst_path`.
virtual void createHardLink(const String & src_path, const String & dst_path) = 0; virtual void createHardLink(const String & src_path, const String & dst_path) = 0;
/// Wrapper for POSIX open
virtual int open(const String & path, int flags) const = 0;
/// Wrapper for POSIX close
virtual void close(int fd) const = 0;
/// Wrapper for POSIX fsync
virtual void sync(int fd) const = 0;
/// Truncate file to specified size. /// Truncate file to specified size.
virtual void truncateFile(const String & path, size_t size); virtual void truncateFile(const String & path, size_t size);
@ -195,6 +199,9 @@ public:
/// Returns executor to perform asynchronous operations. /// Returns executor to perform asynchronous operations.
virtual Executor & getExecutor() { return *executor; } virtual Executor & getExecutor() { return *executor; }
/// Returns guard, that insures synchronization of directory metadata with storage device.
virtual SyncGuardPtr getDirectorySyncGuard(const String & path) const;
private: private:
std::unique_ptr<Executor> executor; std::unique_ptr<Executor> executor;
}; };

View File

@ -0,0 +1,50 @@
#include <Disks/LocalDirectorySyncGuard.h>
#include <Common/Exception.h>
#include <Disks/IDisk.h>
#include <fcntl.h> // O_RDWR
/// OSX does not have O_DIRECTORY
#ifndef O_DIRECTORY
#define O_DIRECTORY O_RDWR
#endif
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_FSYNC;
extern const int FILE_DOESNT_EXIST;
extern const int CANNOT_OPEN_FILE;
extern const int CANNOT_CLOSE_FILE;
}
LocalDirectorySyncGuard::LocalDirectorySyncGuard(const String & full_path)
: fd(::open(full_path.c_str(), O_DIRECTORY))
{
if (-1 == fd)
throwFromErrnoWithPath("Cannot open file " + full_path, full_path,
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);
}
LocalDirectorySyncGuard::~LocalDirectorySyncGuard()
{
try
{
#if defined(OS_DARWIN)
if (fcntl(fd, F_FULLFSYNC, 0))
throwFromErrno("Cannot fcntl(F_FULLFSYNC)", ErrorCodes::CANNOT_FSYNC);
#endif
if (-1 == ::fsync(fd))
throw Exception("Cannot fsync", ErrorCodes::CANNOT_FSYNC);
if (-1 == ::close(fd))
throw Exception("Cannot close file", ErrorCodes::CANNOT_CLOSE_FILE);
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
}

View File

@ -1,7 +1,6 @@
#pragma once #pragma once
#include <string> #include <Disks/IDisk.h>
#include <memory>
namespace DB namespace DB
{ {
@ -13,17 +12,16 @@ using DiskPtr = std::shared_ptr<IDisk>;
/// It's used to keep descriptor open, while doing some operations with it, and do fsync at the end. /// It's used to keep descriptor open, while doing some operations with it, and do fsync at the end.
/// Guaranties of sequence 'close-reopen-fsync' may depend on kernel version. /// Guaranties of sequence 'close-reopen-fsync' may depend on kernel version.
/// Source: linux-fsdevel mailing-list https://marc.info/?l=linux-fsdevel&m=152535409207496 /// Source: linux-fsdevel mailing-list https://marc.info/?l=linux-fsdevel&m=152535409207496
class DirectorySyncGuard class LocalDirectorySyncGuard final : public ISyncGuard
{ {
public: public:
/// NOTE: If you have already opened descriptor, it's preferred to use /// NOTE: If you have already opened descriptor, it's preferred to use
/// this constructor instead of constructor with path. /// this constructor instead of constructor with path.
DirectorySyncGuard(const DiskPtr & disk_, int fd_) : disk(disk_), fd(fd_) {} LocalDirectorySyncGuard(int fd_) : fd(fd_) {}
DirectorySyncGuard(const DiskPtr & disk_, const std::string & path); LocalDirectorySyncGuard(const String & full_path);
~DirectorySyncGuard(); ~LocalDirectorySyncGuard() override;
private: private:
DiskPtr disk;
int fd = -1; int fd = -1;
}; };

View File

@ -36,7 +36,6 @@ namespace ErrorCodes
extern const int CANNOT_SEEK_THROUGH_FILE; extern const int CANNOT_SEEK_THROUGH_FILE;
extern const int UNKNOWN_FORMAT; extern const int UNKNOWN_FORMAT;
extern const int INCORRECT_DISK_INDEX; extern const int INCORRECT_DISK_INDEX;
extern const int NOT_IMPLEMENTED;
extern const int PATH_ACCESS_DENIED; extern const int PATH_ACCESS_DENIED;
extern const int CANNOT_DELETE_DIRECTORY; extern const int CANNOT_DELETE_DIRECTORY;
} }
@ -878,21 +877,6 @@ void DiskS3::setReadOnly(const String & path)
metadata.save(); metadata.save();
} }
int DiskS3::open(const String & /*path*/, int /*flags*/) const
{
throw Exception("Method open is not implemented for S3 disks", ErrorCodes::NOT_IMPLEMENTED);
}
void DiskS3::close(int /*fd*/) const
{
throw Exception("Method close is not implemented for S3 disks", ErrorCodes::NOT_IMPLEMENTED);
}
void DiskS3::sync(int /*fd*/) const
{
throw Exception("Method sync is not implemented for S3 disks", ErrorCodes::NOT_IMPLEMENTED);
}
void DiskS3::shutdown() void DiskS3::shutdown()
{ {
/// This call stops any next retry attempts for ongoing S3 requests. /// This call stops any next retry attempts for ongoing S3 requests.

View File

@ -105,10 +105,6 @@ public:
void setReadOnly(const String & path) override; void setReadOnly(const String & path) override;
int open(const String & path, int flags) const override;
void close(int fd) const override;
void sync(int fd) const override;
const String getType() const override { return "s3"; } const String getType() const override { return "s3"; }
void shutdown() override; void shutdown() override;

View File

@ -112,32 +112,33 @@ void registerDiskS3(DiskFactory & factory)
Poco::File disk{context.getPath() + "disks/" + name}; Poco::File disk{context.getPath() + "disks/" + name};
disk.createDirectories(); disk.createDirectories();
Aws::Client::ClientConfiguration cfg; S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
context.getRemoteHostFilter(),
context.getGlobalContext().getSettingsRef().s3_max_redirects);
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint"))); S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
if (uri.key.back() != '/') if (uri.key.back() != '/')
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS); throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
cfg.connectTimeoutMs = config.getUInt(config_prefix + ".connect_timeout_ms", 10000); client_configuration.connectTimeoutMs = config.getUInt(config_prefix + ".connect_timeout_ms", 10000);
cfg.httpRequestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 5000); client_configuration.httpRequestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 5000);
cfg.maxConnections = config.getUInt(config_prefix + ".max_connections", 100); client_configuration.maxConnections = config.getUInt(config_prefix + ".max_connections", 100);
cfg.endpointOverride = uri.endpoint; client_configuration.endpointOverride = uri.endpoint;
auto proxy_config = getProxyConfiguration(config_prefix, config); auto proxy_config = getProxyConfiguration(config_prefix, config);
if (proxy_config) if (proxy_config)
cfg.perRequestConfiguration = [proxy_config](const auto & request) { return proxy_config->getConfiguration(request); }; client_configuration.perRequestConfiguration = [proxy_config](const auto & request) { return proxy_config->getConfiguration(request); };
cfg.retryStrategy = std::make_shared<Aws::Client::DefaultRetryStrategy>( client_configuration.retryStrategy = std::make_shared<Aws::Client::DefaultRetryStrategy>(
config.getUInt(config_prefix + ".retry_attempts", 10)); config.getUInt(config_prefix + ".retry_attempts", 10));
auto client = S3::ClientFactory::instance().create( auto client = S3::ClientFactory::instance().create(
cfg, client_configuration,
uri.is_virtual_hosted_style, uri.is_virtual_hosted_style,
config.getString(config_prefix + ".access_key_id", ""), config.getString(config_prefix + ".access_key_id", ""),
config.getString(config_prefix + ".secret_access_key", ""), config.getString(config_prefix + ".secret_access_key", ""),
config.getBool(config_prefix + ".use_environment_credentials", config.getBool("s3.use_environment_credentials", false)), config.getBool(config_prefix + ".use_environment_credentials", config.getBool("s3.use_environment_credentials", false))
context.getRemoteHostFilter(), );
context.getGlobalContext().getSettingsRef().s3_max_redirects);
String metadata_path = config.getString(config_prefix + ".metadata_path", context.getPath() + "disks/" + name + "/"); String metadata_path = config.getString(config_prefix + ".metadata_path", context.getPath() + "disks/" + name + "/");

View File

@ -17,6 +17,7 @@ SRCS(
DiskSelector.cpp DiskSelector.cpp
IDisk.cpp IDisk.cpp
IVolume.cpp IVolume.cpp
LocalDirectorySyncGuard.cpp
SingleDiskVolume.cpp SingleDiskVolume.cpp
StoragePolicy.cpp StoragePolicy.cpp
VolumeJBOD.cpp VolumeJBOD.cpp

View File

@ -51,9 +51,9 @@ inline auto checkedDivision(A a, B b)
throwIfDivisionLeadsToFPE(a, b); throwIfDivisionLeadsToFPE(a, b);
if constexpr (is_big_int_v<A> && std::is_floating_point_v<B>) if constexpr (is_big_int_v<A> && std::is_floating_point_v<B>)
return bigint_cast<B>(a) / b; return static_cast<B>(a) / b;
else if constexpr (is_big_int_v<B> && std::is_floating_point_v<A>) else if constexpr (is_big_int_v<B> && std::is_floating_point_v<A>)
return a / bigint_cast<A>(b); return a / static_cast<A>(b);
else if constexpr (is_big_int_v<A> && is_big_int_v<B>) else if constexpr (is_big_int_v<A> && is_big_int_v<B>)
return static_cast<A>(a / b); return static_cast<A>(a / b);
else if constexpr (!is_big_int_v<A> && is_big_int_v<B>) else if constexpr (!is_big_int_v<A> && is_big_int_v<B>)
@ -84,10 +84,10 @@ struct DivideIntegralImpl
using SignedCastA = make_signed_t<CastA>; using SignedCastA = make_signed_t<CastA>;
using SignedCastB = std::conditional_t<sizeof(A) <= sizeof(B), make_signed_t<CastB>, SignedCastA>; using SignedCastB = std::conditional_t<sizeof(A) <= sizeof(B), make_signed_t<CastB>, SignedCastA>;
return bigint_cast<Result>(checkedDivision(bigint_cast<SignedCastA>(a), bigint_cast<SignedCastB>(b))); return static_cast<Result>(checkedDivision(static_cast<SignedCastA>(a), static_cast<SignedCastB>(b)));
} }
else else
return bigint_cast<Result>(checkedDivision(CastA(a), CastB(b))); return static_cast<Result>(checkedDivision(CastA(a), CastB(b)));
} }
#if USE_EMBEDDED_COMPILER #if USE_EMBEDDED_COMPILER
@ -110,7 +110,7 @@ struct ModuloImpl
if constexpr (std::is_floating_point_v<ResultType>) if constexpr (std::is_floating_point_v<ResultType>)
{ {
/// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance. /// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance.
return bigint_cast<ResultType>(a) - trunc(bigint_cast<ResultType>(a) / bigint_cast<ResultType>(b)) * bigint_cast<ResultType>(b); return static_cast<ResultType>(a) - trunc(static_cast<ResultType>(a) / static_cast<ResultType>(b)) * static_cast<ResultType>(b);
} }
else else
{ {
@ -125,9 +125,9 @@ struct ModuloImpl
CastB int_b(b); CastB int_b(b);
if constexpr (is_big_int_v<IntegerBType> && sizeof(IntegerAType) <= sizeof(IntegerBType)) if constexpr (is_big_int_v<IntegerBType> && sizeof(IntegerAType) <= sizeof(IntegerBType))
return bigint_cast<Result>(bigint_cast<CastB>(int_a) % int_b); return static_cast<Result>(static_cast<CastB>(int_a) % int_b);
else else
return bigint_cast<Result>(int_a % bigint_cast<CastA>(int_b)); return static_cast<Result>(int_a % static_cast<CastA>(int_b));
} }
else else
return IntegerAType(a) % IntegerBType(b); return IntegerAType(a) % IntegerBType(b);

View File

@ -79,8 +79,9 @@ struct InvalidType;
template <template <typename> class Op, typename Name, bool is_injective> template <template <typename> class Op, typename Name, bool is_injective>
class FunctionUnaryArithmetic : public IFunction class FunctionUnaryArithmetic : public IFunction
{ {
static constexpr bool allow_decimal = IsUnaryOperation<Op>::negate || IsUnaryOperation<Op>::abs; static constexpr bool allow_decimal = IsUnaryOperation<Op>::negate || IsUnaryOperation<Op>::abs || IsUnaryOperation<Op>::sign;
static constexpr bool allow_fixed_string = Op<UInt8>::allow_fixed_string; static constexpr bool allow_fixed_string = Op<UInt8>::allow_fixed_string;
static constexpr bool is_sign_function = IsUnaryOperation<Op>::sign;
template <typename F> template <typename F>
static bool castType(const IDataType * type, F && f) static bool castType(const IDataType * type, F && f)
@ -137,7 +138,7 @@ public:
{ {
using T0 = typename DataType::FieldType; using T0 = typename DataType::FieldType;
if constexpr (IsDataTypeDecimal<DataType>) if constexpr (IsDataTypeDecimal<DataType> && !is_sign_function)
{ {
if constexpr (!allow_decimal) if constexpr (!allow_decimal)
return false; return false;
@ -182,6 +183,17 @@ public:
if constexpr (allow_decimal) if constexpr (allow_decimal)
{ {
if (auto col = checkAndGetColumn<ColumnDecimal<T0>>(arguments[0].column.get())) if (auto col = checkAndGetColumn<ColumnDecimal<T0>>(arguments[0].column.get()))
{
if constexpr (is_sign_function)
{
auto col_res = ColumnVector<typename Op<T0>::ResultType>::create();
auto & vec_res = col_res->getData();
vec_res.resize(col->getData().size());
UnaryOperationImpl<T0, Op<T0>>::vector(col->getData(), vec_res);
result_column = std::move(col_res);
return true;
}
else
{ {
auto col_res = ColumnDecimal<typename Op<T0>::ResultType>::create(0, type.getScale()); auto col_res = ColumnDecimal<typename Op<T0>::ResultType>::create(0, type.getScale());
auto & vec_res = col_res->getData(); auto & vec_res = col_res->getData();
@ -192,6 +204,7 @@ public:
} }
} }
} }
}
else else
{ {
using T0 = typename DataType::FieldType; using T0 = typename DataType::FieldType;

View File

@ -12,19 +12,14 @@ struct NameFunctionIPv4NumToStringClassC { static constexpr auto name = "IPv4Num
void registerFunctionsCoding(FunctionFactory & factory) void registerFunctionsCoding(FunctionFactory & factory)
{ {
factory.registerFunction<FunctionToStringCutToZero>(); factory.registerFunction<FunctionToStringCutToZero>();
factory.registerFunction<FunctionIPv6NumToString>();
factory.registerFunction<FunctionCutIPv6>(); factory.registerFunction<FunctionCutIPv6>();
factory.registerFunction<FunctionIPv6StringToNum>();
factory.registerFunction<FunctionIPv4NumToString<0, NameFunctionIPv4NumToString>>();
factory.registerFunction<FunctionIPv4NumToString<1, NameFunctionIPv4NumToStringClassC>>();
factory.registerFunction<FunctionIPv4StringToNum>();
factory.registerFunction<FunctionIPv4ToIPv6>(); factory.registerFunction<FunctionIPv4ToIPv6>();
factory.registerFunction<FunctionMACNumToString>(); factory.registerFunction<FunctionMACNumToString>();
factory.registerFunction<FunctionMACStringTo<ParseMACImpl>>(); factory.registerFunction<FunctionMACStringTo<ParseMACImpl>>();
factory.registerFunction<FunctionMACStringTo<ParseOUIImpl>>(); factory.registerFunction<FunctionMACStringTo<ParseOUIImpl>>();
factory.registerFunction<FunctionUUIDNumToString>(); factory.registerFunction<FunctionUUIDNumToString>();
factory.registerFunction<FunctionUUIDStringToNum>(); factory.registerFunction<FunctionUUIDStringToNum>();
factory.registerFunction<FunctionHex>(); factory.registerFunction<FunctionHex>(FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionUnhex>(); factory.registerFunction<FunctionUnhex>();
factory.registerFunction<FunctionChar>(FunctionFactory::CaseInsensitive); factory.registerFunction<FunctionChar>(FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionBitmaskToArray>(); factory.registerFunction<FunctionBitmaskToArray>();
@ -34,6 +29,23 @@ void registerFunctionsCoding(FunctionFactory & factory)
factory.registerFunction<FunctionIPv4CIDRToRange>(); factory.registerFunction<FunctionIPv4CIDRToRange>();
factory.registerFunction<FunctionIsIPv4String>(); factory.registerFunction<FunctionIsIPv4String>();
factory.registerFunction<FunctionIsIPv6String>(); factory.registerFunction<FunctionIsIPv6String>();
factory.registerFunction<FunctionIPv4NumToString<0, NameFunctionIPv4NumToString>>();
factory.registerFunction<FunctionIPv4NumToString<1, NameFunctionIPv4NumToStringClassC>>();
/// MysQL compatibility alias.
factory.registerFunction<FunctionIPv4NumToString<0, NameFunctionIPv4NumToString>>("INET_NTOA", FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionIPv4StringToNum>();
/// MysQL compatibility alias.
factory.registerFunction<FunctionIPv4StringToNum>("INET_ATON", FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionIPv6NumToString>();
/// MysQL compatibility alias.
factory.registerFunction<FunctionIPv6NumToString>("INET6_NTOA", FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionIPv6StringToNum>();
/// MysQL compatibility alias.
factory.registerFunction<FunctionIPv6StringToNum>("INET6_ATON", FunctionFactory::CaseInsensitive);
} }
} }

View File

@ -81,8 +81,8 @@ namespace ErrorCodes
template <typename A, typename B, typename Op> template <typename A, typename B, typename Op>
struct NumComparisonImpl struct NumComparisonImpl
{ {
using ContainerA = std::conditional_t<!is_big_int_v<A>, PaddedPODArray<A>, std::vector<A>>; using ContainerA = PaddedPODArray<A>;
using ContainerB = std::conditional_t<!is_big_int_v<B>, PaddedPODArray<B>, std::vector<B>>; using ContainerB = PaddedPODArray<B>;
/// If you don't specify NO_INLINE, the compiler will inline this function, but we don't need this as this function contains tight loop inside. /// If you don't specify NO_INLINE, the compiler will inline this function, but we don't need this as this function contains tight loop inside.
static void NO_INLINE vectorVector(const ContainerA & a, const ContainerB & b, PaddedPODArray<UInt8> & c) static void NO_INLINE vectorVector(const ContainerA & a, const ContainerB & b, PaddedPODArray<UInt8> & c)

View File

@ -29,6 +29,9 @@ void registerFunctionsConversion(FunctionFactory & factory)
factory.registerFunction<FunctionToDecimal256>(); factory.registerFunction<FunctionToDecimal256>();
factory.registerFunction<FunctionToDate>(); factory.registerFunction<FunctionToDate>();
/// MysQL compatibility alias.
factory.registerFunction<FunctionToDate>("DATE", FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionToDateTime>(); factory.registerFunction<FunctionToDateTime>();
factory.registerFunction<FunctionToDateTime32>(); factory.registerFunction<FunctionToDateTime32>();
factory.registerFunction<FunctionToDateTime64>(); factory.registerFunction<FunctionToDateTime64>();
@ -92,9 +95,11 @@ void registerFunctionsConversion(FunctionFactory & factory)
factory.registerFunction<FunctionToUUIDOrNull>(); factory.registerFunction<FunctionToUUIDOrNull>();
factory.registerFunction<FunctionParseDateTimeBestEffort>(); factory.registerFunction<FunctionParseDateTimeBestEffort>();
factory.registerFunction<FunctionParseDateTimeBestEffortUS>();
factory.registerFunction<FunctionParseDateTimeBestEffortOrZero>(); factory.registerFunction<FunctionParseDateTimeBestEffortOrZero>();
factory.registerFunction<FunctionParseDateTimeBestEffortOrNull>(); factory.registerFunction<FunctionParseDateTimeBestEffortOrNull>();
factory.registerFunction<FunctionParseDateTimeBestEffortUS>();
factory.registerFunction<FunctionParseDateTimeBestEffortUSOrZero>();
factory.registerFunction<FunctionParseDateTimeBestEffortUSOrNull>();
factory.registerFunction<FunctionParseDateTime32BestEffort>(); factory.registerFunction<FunctionParseDateTime32BestEffort>();
factory.registerFunction<FunctionParseDateTime32BestEffortOrZero>(); factory.registerFunction<FunctionParseDateTime32BestEffortOrZero>();
factory.registerFunction<FunctionParseDateTime32BestEffortOrNull>(); factory.registerFunction<FunctionParseDateTime32BestEffortOrNull>();

View File

@ -955,6 +955,12 @@ struct ConvertThroughParsing
vec_to[i] = res; vec_to[i] = res;
} }
} }
else if constexpr (parsing_mode == ConvertFromStringParsingMode::BestEffortUS)
{
time_t res;
parsed = tryParseDateTimeBestEffortUS(res, read_buffer, *local_time_zone, *utc_time_zone);
vec_to[i] = res;
}
else else
{ {
if constexpr (to_datetime64) if constexpr (to_datetime64)
@ -2003,9 +2009,11 @@ using FunctionToDecimal256OrNull = FunctionConvertFromString<DataTypeDecimal<Dec
using FunctionToUUIDOrNull = FunctionConvertFromString<DataTypeUUID, NameToUUIDOrNull, ConvertFromStringExceptionMode::Null>; using FunctionToUUIDOrNull = FunctionConvertFromString<DataTypeUUID, NameToUUIDOrNull, ConvertFromStringExceptionMode::Null>;
struct NameParseDateTimeBestEffort { static constexpr auto name = "parseDateTimeBestEffort"; }; struct NameParseDateTimeBestEffort { static constexpr auto name = "parseDateTimeBestEffort"; };
struct NameParseDateTimeBestEffortUS { static constexpr auto name = "parseDateTimeBestEffortUS"; };
struct NameParseDateTimeBestEffortOrZero { static constexpr auto name = "parseDateTimeBestEffortOrZero"; }; struct NameParseDateTimeBestEffortOrZero { static constexpr auto name = "parseDateTimeBestEffortOrZero"; };
struct NameParseDateTimeBestEffortOrNull { static constexpr auto name = "parseDateTimeBestEffortOrNull"; }; struct NameParseDateTimeBestEffortOrNull { static constexpr auto name = "parseDateTimeBestEffortOrNull"; };
struct NameParseDateTimeBestEffortUS { static constexpr auto name = "parseDateTimeBestEffortUS"; };
struct NameParseDateTimeBestEffortUSOrZero { static constexpr auto name = "parseDateTimeBestEffortUSOrZero"; };
struct NameParseDateTimeBestEffortUSOrNull { static constexpr auto name = "parseDateTimeBestEffortUSOrNull"; };
struct NameParseDateTime32BestEffort { static constexpr auto name = "parseDateTime32BestEffort"; }; struct NameParseDateTime32BestEffort { static constexpr auto name = "parseDateTime32BestEffort"; };
struct NameParseDateTime32BestEffortOrZero { static constexpr auto name = "parseDateTime32BestEffortOrZero"; }; struct NameParseDateTime32BestEffortOrZero { static constexpr auto name = "parseDateTime32BestEffortOrZero"; };
struct NameParseDateTime32BestEffortOrNull { static constexpr auto name = "parseDateTime32BestEffortOrNull"; }; struct NameParseDateTime32BestEffortOrNull { static constexpr auto name = "parseDateTime32BestEffortOrNull"; };
@ -2016,13 +2024,18 @@ struct NameParseDateTime64BestEffortOrNull { static constexpr auto name = "parse
using FunctionParseDateTimeBestEffort = FunctionConvertFromString< using FunctionParseDateTimeBestEffort = FunctionConvertFromString<
DataTypeDateTime, NameParseDateTimeBestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>; DataTypeDateTime, NameParseDateTimeBestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>;
using FunctionParseDateTimeBestEffortUS = FunctionConvertFromString<
DataTypeDateTime, NameParseDateTimeBestEffortUS, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffortUS>;
using FunctionParseDateTimeBestEffortOrZero = FunctionConvertFromString< using FunctionParseDateTimeBestEffortOrZero = FunctionConvertFromString<
DataTypeDateTime, NameParseDateTimeBestEffortOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffort>; DataTypeDateTime, NameParseDateTimeBestEffortOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffort>;
using FunctionParseDateTimeBestEffortOrNull = FunctionConvertFromString< using FunctionParseDateTimeBestEffortOrNull = FunctionConvertFromString<
DataTypeDateTime, NameParseDateTimeBestEffortOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffort>; DataTypeDateTime, NameParseDateTimeBestEffortOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffort>;
using FunctionParseDateTimeBestEffortUS = FunctionConvertFromString<
DataTypeDateTime, NameParseDateTimeBestEffortUS, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffortUS>;
using FunctionParseDateTimeBestEffortUSOrZero = FunctionConvertFromString<
DataTypeDateTime, NameParseDateTimeBestEffortUSOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffortUS>;
using FunctionParseDateTimeBestEffortUSOrNull = FunctionConvertFromString<
DataTypeDateTime, NameParseDateTimeBestEffortUSOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffortUS>;
using FunctionParseDateTime32BestEffort = FunctionConvertFromString< using FunctionParseDateTime32BestEffort = FunctionConvertFromString<
DataTypeDateTime, NameParseDateTime32BestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>; DataTypeDateTime, NameParseDateTime32BestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>;
using FunctionParseDateTime32BestEffortOrZero = FunctionConvertFromString< using FunctionParseDateTime32BestEffortOrZero = FunctionConvertFromString<

View File

@ -806,16 +806,7 @@ private:
size_t size = vec_from.size(); size_t size = vec_from.size();
for (size_t i = 0; i < size; ++i) for (size_t i = 0; i < size; ++i)
{ {
ToType h; ToType h = Impl::apply(reinterpret_cast<const char *>(&vec_from[i]), sizeof(vec_from[i]));
if constexpr (OverBigInt<FromType>)
{
using NativeT = typename NativeType<FromType>::Type;
std::string buffer = BigInt<NativeT>::serialize(vec_from[i]);
h = Impl::apply(buffer.data(), buffer.size());
}
else
h = Impl::apply(reinterpret_cast<const char *>(&vec_from[i]), sizeof(vec_from[i]));
if constexpr (first) if constexpr (first)
vec_to[i] = h; vec_to[i] = h;
@ -827,16 +818,7 @@ private:
{ {
auto value = col_from_const->template getValue<FromType>(); auto value = col_from_const->template getValue<FromType>();
ToType h; ToType h = Impl::apply(reinterpret_cast<const char *>(&value), sizeof(value));
if constexpr (OverBigInt<FromType>)
{
using NativeT = typename NativeType<FromType>::Type;
std::string buffer = BigInt<NativeT>::serialize(value);
h = Impl::apply(buffer.data(), buffer.size());
}
else
h = Impl::apply(reinterpret_cast<const char *>(&value), sizeof(value));
size_t size = vec_to.size(); size_t size = vec_to.size();
if constexpr (first) if constexpr (first)

View File

@ -0,0 +1,67 @@
#pragma once
#include <DataTypes/NumberTraits.h>
#include <Common/Exception.h>
#include <numeric>
#include <limits>
#include <type_traits>
#if !defined(ARCADIA_BUILD)
# include "config_core.h"
#endif
namespace DB
{
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
extern const int DECIMAL_OVERFLOW;
}
template <class T>
inline constexpr bool is_gcd_lcm_implemeted = !(is_big_int_v<T> || std::is_floating_point_v<T>);
template <typename A, typename B, typename Impl, typename Name>
struct GCDLCMImpl
{
using ResultType = typename NumberTraits::ResultOfAdditionMultiplication<A, B>::Type;
static const constexpr bool allow_fixed_string = false;
template <typename Result = ResultType>
static inline std::enable_if_t<!is_gcd_lcm_implemeted<Result>, Result>
apply(A, B)
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not implemented for big integers and floats", Name::name);
}
template <typename Result = ResultType>
static inline std::enable_if_t<is_gcd_lcm_implemeted<Result>, Result>
apply(A a, B b)
{
throwIfDivisionLeadsToFPE(typename NumberTraits::ToInteger<A>::Type(a), typename NumberTraits::ToInteger<B>::Type(b));
throwIfDivisionLeadsToFPE(typename NumberTraits::ToInteger<B>::Type(b), typename NumberTraits::ToInteger<A>::Type(a));
using Int = typename NumberTraits::ToInteger<Result>::Type;
if constexpr (is_signed_v<Result>)
{
/// gcd() internally uses std::abs()
Int a_s = static_cast<Int>(a);
Int b_s = static_cast<Int>(b);
Int min = std::numeric_limits<Int>::min();
Int max = std::numeric_limits<Int>::max();
if (unlikely((a_s == min || a_s == max) || (b_s == min || b_s == max)))
throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "Intermediate result overflow (signed a = {}, signed b = {}, min = {}, max = {})", a_s, b_s, min, max);
}
return Impl::applyImpl(a, b);
}
#if USE_EMBEDDED_COMPILER
static constexpr bool compilable = false; /// exceptions (and a non-trivial algorithm)
#endif
};
}

View File

@ -50,9 +50,9 @@ void writeSlice(const NumericArraySlice<T> & slice, NumericArraySink<U> & sink)
throw Exception("No conversion between UInt128 and " + demangle(typeid(T).name()), ErrorCodes::NOT_IMPLEMENTED); throw Exception("No conversion between UInt128 and " + demangle(typeid(T).name()), ErrorCodes::NOT_IMPLEMENTED);
} }
else if constexpr (IsDecimalNumber<T>) else if constexpr (IsDecimalNumber<T>)
dst = bigint_cast<NativeU>(src.value); dst = static_cast<NativeU>(src.value);
else else
dst = bigint_cast<NativeU>(src); dst = static_cast<NativeU>(src);
} }
else else
dst = static_cast<NativeU>(src); dst = static_cast<NativeU>(src);

View File

@ -20,6 +20,9 @@ template <typename, typename> struct NotEqualsOp;
template <typename, typename> struct LessOrEqualsOp; template <typename, typename> struct LessOrEqualsOp;
template <typename, typename> struct GreaterOrEqualsOp; template <typename, typename> struct GreaterOrEqualsOp;
template <typename>
struct SignImpl;
template <template <typename, typename> typename Op1, template <typename, typename> typename Op2> template <template <typename, typename> typename Op1, template <typename, typename> typename Op2>
struct IsSameOperation struct IsSameOperation
{ {
@ -31,6 +34,7 @@ struct IsUnaryOperation
{ {
static constexpr bool abs = std::is_same_v<Op<Int8>, AbsImpl<Int8>>; static constexpr bool abs = std::is_same_v<Op<Int8>, AbsImpl<Int8>>;
static constexpr bool negate = std::is_same_v<Op<Int8>, NegateImpl<Int8>>; static constexpr bool negate = std::is_same_v<Op<Int8>, NegateImpl<Int8>>;
static constexpr bool sign = std::is_same_v<Op<Int8>, SignImpl<Int8>>;
}; };
template <template <typename, typename> typename Op> template <template <typename, typename> typename Op>

View File

@ -10,6 +10,9 @@ void registerFunctionBase64Decode(FunctionFactory & factory)
{ {
tb64ini(0, 1); tb64ini(0, 1);
factory.registerFunction<FunctionBase64Conversion<Base64Decode>>(); factory.registerFunction<FunctionBase64Conversion<Base64Decode>>();
/// MysQL compatibility alias.
factory.registerFunction<FunctionBase64Conversion<Base64Decode>>("FROM_BASE64", FunctionFactory::CaseInsensitive);
} }
} }
#endif #endif

View File

@ -14,6 +14,9 @@ void registerFunctionBase64Encode(FunctionFactory & factory)
{ {
tb64ini(0, 1); tb64ini(0, 1);
factory.registerFunction<FunctionBase64Conversion<Base64Encode>>(); factory.registerFunction<FunctionBase64Conversion<Base64Encode>>();
/// MysQL compatibility alias.
factory.registerFunction<FunctionBase64Conversion<Base64Encode>>("TO_BASE64", FunctionFactory::CaseInsensitive);
} }
} }
#endif #endif

View File

@ -21,7 +21,7 @@ struct BitAndImpl
template <typename Result = ResultType> template <typename Result = ResultType>
static inline Result apply(A a, B b) static inline Result apply(A a, B b)
{ {
return bigint_cast<Result>(a) & bigint_cast<Result>(b); return static_cast<Result>(a) & static_cast<Result>(b);
} }
#if USE_EMBEDDED_COMPILER #if USE_EMBEDDED_COMPILER

View File

@ -20,7 +20,7 @@ struct BitOrImpl
template <typename Result = ResultType> template <typename Result = ResultType>
static inline Result apply(A a, B b) static inline Result apply(A a, B b)
{ {
return bigint_cast<Result>(a) | bigint_cast<Result>(b); return static_cast<Result>(a) | static_cast<Result>(b);
} }
#if USE_EMBEDDED_COMPILER #if USE_EMBEDDED_COMPILER

View File

@ -24,7 +24,7 @@ struct BitShiftLeftImpl
if constexpr (is_big_int_v<B>) if constexpr (is_big_int_v<B>)
throw Exception("BitShiftLeft is not implemented for big integers as second argument", ErrorCodes::NOT_IMPLEMENTED); throw Exception("BitShiftLeft is not implemented for big integers as second argument", ErrorCodes::NOT_IMPLEMENTED);
else if constexpr (is_big_int_v<A>) else if constexpr (is_big_int_v<A>)
return bigint_cast<Result>(a) << bigint_cast<UInt32>(b); return static_cast<Result>(a) << static_cast<UInt32>(b);
else else
return static_cast<Result>(a) << static_cast<Result>(b); return static_cast<Result>(a) << static_cast<Result>(b);
} }

Some files were not shown because too many files have changed in this diff Show More