mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 10:02:01 +00:00
Merge branch 'master' into tuple-of-intervals
This commit is contained in:
commit
2b286222f7
2
.github/workflows/debug.yml
vendored
2
.github/workflows/debug.yml
vendored
@ -2,7 +2,7 @@
|
|||||||
name: Debug
|
name: Debug
|
||||||
|
|
||||||
'on':
|
'on':
|
||||||
[push, pull_request, release, workflow_dispatch]
|
[push, pull_request, release, workflow_dispatch, workflow_call]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
DebugInfo:
|
DebugInfo:
|
||||||
|
3
.github/workflows/nightly.yml
vendored
3
.github/workflows/nightly.yml
vendored
@ -10,6 +10,9 @@ env:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
Debug:
|
||||||
|
# The task for having a preserved ENV and event.json for later investigation
|
||||||
|
uses: ./.github/workflows/debug.yml
|
||||||
DockerHubPushAarch64:
|
DockerHubPushAarch64:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
# include <sys/syscall.h>
|
# include <sys/syscall.h>
|
||||||
#endif
|
#endif
|
||||||
|
#include <cstdlib>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <base/safeExit.h>
|
#include <base/safeExit.h>
|
||||||
|
#include <base/defines.h> /// for THREAD_SANITIZER
|
||||||
|
|
||||||
[[noreturn]] void safeExit(int code)
|
[[noreturn]] void safeExit(int code)
|
||||||
{
|
{
|
||||||
|
@ -8,6 +8,8 @@
|
|||||||
#include <link.h> // ElfW
|
#include <link.h> // ElfW
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
|
||||||
|
#include "syscall.h"
|
||||||
|
|
||||||
#define ARRAY_SIZE(a) sizeof((a))/sizeof((a[0]))
|
#define ARRAY_SIZE(a) sizeof((a))/sizeof((a[0]))
|
||||||
|
|
||||||
/// Suppress TSan since it is possible for this code to be called from multiple threads,
|
/// Suppress TSan since it is possible for this code to be called from multiple threads,
|
||||||
@ -39,7 +41,9 @@ ssize_t __retry_read(int fd, void * buf, size_t count)
|
|||||||
{
|
{
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
ssize_t ret = read(fd, buf, count);
|
// We cannot use the read syscall as it will be intercept by sanitizers, which aren't
|
||||||
|
// initialized yet. Emit syscall directly.
|
||||||
|
ssize_t ret = __syscall_ret(__syscall(SYS_read, fd, buf, count));
|
||||||
if (ret == -1)
|
if (ret == -1)
|
||||||
{
|
{
|
||||||
if (errno == EINTR)
|
if (errno == EINTR)
|
||||||
@ -90,6 +94,11 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type)
|
|||||||
_Static_assert(sizeof(aux) < 4096, "Unexpected sizeof(aux)");
|
_Static_assert(sizeof(aux) < 4096, "Unexpected sizeof(aux)");
|
||||||
while (__retry_read(fd, &aux, sizeof(aux)) == sizeof(aux))
|
while (__retry_read(fd, &aux, sizeof(aux)) == sizeof(aux))
|
||||||
{
|
{
|
||||||
|
#if defined(__has_feature)
|
||||||
|
#if __has_feature(memory_sanitizer)
|
||||||
|
__msan_unpoison(&aux, sizeof(aux));
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
if (aux.a_type == AT_NULL)
|
if (aux.a_type == AT_NULL)
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 1be805e7cb2494aa8170015493474379b0362dfc
|
Subproject commit e4e746a24eb56861a86f3672771e3308d8c40722
|
@ -1,7 +1,7 @@
|
|||||||
# docker build -t clickhouse/style-test .
|
# docker build -t clickhouse/style-test .
|
||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
ARG ACT_VERSION=0.2.25
|
ARG ACT_VERSION=0.2.33
|
||||||
ARG ACTIONLINT_VERSION=1.6.8
|
ARG ACTIONLINT_VERSION=1.6.22
|
||||||
|
|
||||||
# ARG for quick switch to a given ubuntu mirror
|
# ARG for quick switch to a given ubuntu mirror
|
||||||
ARG apt_archive="http://archive.ubuntu.com"
|
ARG apt_archive="http://archive.ubuntu.com"
|
||||||
|
@ -86,7 +86,7 @@ node1 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY
|
|||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─hosts─┬─groupArray(n)─┐
|
┌─hosts─┬─groupArray(n)─┐
|
||||||
│ node1 │ [1,3,5,7,9] │
|
│ node3 │ [1,3,5,7,9] │
|
||||||
│ node2 │ [0,2,4,6,8] │
|
│ node2 │ [0,2,4,6,8] │
|
||||||
└───────┴───────────────┘
|
└───────┴───────────────┘
|
||||||
```
|
```
|
||||||
|
@ -68,36 +68,57 @@ In the results of `SELECT` query, the values of `AggregateFunction` type have im
|
|||||||
|
|
||||||
## Example of an Aggregated Materialized View {#example-of-an-aggregated-materialized-view}
|
## Example of an Aggregated Materialized View {#example-of-an-aggregated-materialized-view}
|
||||||
|
|
||||||
`AggregatingMergeTree` materialized view that watches the `test.visits` table:
|
We will create the table `test.visits` that contain the raw data:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE MATERIALIZED VIEW test.basic
|
CREATE TABLE test.visits
|
||||||
ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate)
|
(
|
||||||
|
StartDate DateTime64 NOT NULL,
|
||||||
|
CounterID UInt64,
|
||||||
|
Sign Nullable(Int32),
|
||||||
|
UserID Nullable(Int32)
|
||||||
|
) ENGINE = MergeTree ORDER BY (StartDate, CounterID);
|
||||||
|
```
|
||||||
|
|
||||||
|
`AggregatingMergeTree` materialized view that watches the `test.visits` table, and use the `AggregateFunction` type:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE MATERIALIZED VIEW test.mv_visits
|
||||||
|
(
|
||||||
|
StartDate DateTime64 NOT NULL,
|
||||||
|
CounterID UInt64,
|
||||||
|
Visits AggregateFunction(sum, Nullable(Int32)),
|
||||||
|
Users AggregateFunction(uniq, Nullable(Int32))
|
||||||
|
)
|
||||||
|
ENGINE = AggregatingMergeTree() ORDER BY (StartDate, CounterID)
|
||||||
AS SELECT
|
AS SELECT
|
||||||
CounterID,
|
|
||||||
StartDate,
|
StartDate,
|
||||||
sumState(Sign) AS Visits,
|
CounterID,
|
||||||
|
sumState(Sign) AS Visits,
|
||||||
uniqState(UserID) AS Users
|
uniqState(UserID) AS Users
|
||||||
FROM test.visits
|
FROM test.visits
|
||||||
GROUP BY CounterID, StartDate;
|
GROUP BY StartDate, CounterID;
|
||||||
```
|
```
|
||||||
|
|
||||||
Inserting data into the `test.visits` table.
|
Inserting data into the `test.visits` table.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
INSERT INTO test.visits ...
|
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||||
|
VALUES (1667446031, 1, 3, 4)
|
||||||
|
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||||
|
VALUES (1667446031, 1, 6, 3)
|
||||||
```
|
```
|
||||||
|
|
||||||
The data are inserted in both the table and view `test.basic` that will perform the aggregation.
|
The data are inserted in both the table and the materialized view `test.mv_visits`.
|
||||||
|
|
||||||
To get the aggregated data, we need to execute a query such as `SELECT ... GROUP BY ...` from the view `test.basic`:
|
To get the aggregated data, we need to execute a query such as `SELECT ... GROUP BY ...` from the materialized view `test.mv_visits`:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT
|
SELECT
|
||||||
StartDate,
|
StartDate,
|
||||||
sumMerge(Visits) AS Visits,
|
sumMerge(Visits) AS Visits,
|
||||||
uniqMerge(Users) AS Users
|
uniqMerge(Users) AS Users
|
||||||
FROM test.basic
|
FROM test.mv_visits
|
||||||
GROUP BY StartDate
|
GROUP BY StartDate
|
||||||
ORDER BY StartDate;
|
ORDER BY StartDate;
|
||||||
```
|
```
|
||||||
|
@ -1,9 +1,5 @@
|
|||||||
---
|
|
||||||
slug: /en/operations/troubleshooting
|
[//]: # (This file is included in FAQ > Troubleshooting)
|
||||||
sidebar_position: 46
|
|
||||||
sidebar_label: Troubleshooting
|
|
||||||
title: Troubleshooting
|
|
||||||
---
|
|
||||||
|
|
||||||
- [Installation](#troubleshooting-installation-errors)
|
- [Installation](#troubleshooting-installation-errors)
|
||||||
- [Connecting to the server](#troubleshooting-accepts-no-connections)
|
- [Connecting to the server](#troubleshooting-accepts-no-connections)
|
@ -126,7 +126,7 @@ clickhouse keeper --config /etc/your_path_to_config/config.xml
|
|||||||
|
|
||||||
ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively.
|
ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively.
|
||||||
|
|
||||||
The 4lw commands has a white list configuration `four_letter_word_white_list` which has default value `conf,cons,crst,envi,ruok,srst,srvr,stat,wchc,wchs,dirs,mntr,isro`.
|
The 4lw commands has a white list configuration `four_letter_word_white_list` which has default value `conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif`.
|
||||||
|
|
||||||
You can issue the commands to ClickHouse Keeper via telnet or nc, at the client port.
|
You can issue the commands to ClickHouse Keeper via telnet or nc, at the client port.
|
||||||
|
|
||||||
@ -309,6 +309,25 @@ Sessions with Ephemerals (1):
|
|||||||
/clickhouse/task_queue/ddl
|
/clickhouse/task_queue/ddl
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- `csnp`: Schedule a snapshot creation task. Return the last committed log index of the scheduled snapshot if success or `Failed to schedule snapshot creation task.` if failed. Note that `lgif` command can help you determine whether the snapshot is done.
|
||||||
|
|
||||||
|
```
|
||||||
|
100
|
||||||
|
```
|
||||||
|
|
||||||
|
- `lgif`: Keeper log information. `first_log_idx` : my first log index in log store; `first_log_term` : my first log term; `last_log_idx` : my last log index in log store; `last_log_term` : my last log term; `last_committed_log_idx` : my last committed log index in state machine; `leader_committed_log_idx` : leader's committed log index from my perspective; `target_committed_log_idx` : target log index should be committed to; `last_snapshot_idx` : the largest committed log index in last snapshot.
|
||||||
|
|
||||||
|
```
|
||||||
|
first_log_idx 1
|
||||||
|
first_log_term 1
|
||||||
|
last_log_idx 101
|
||||||
|
last_log_term 1
|
||||||
|
last_committed_log_idx 100
|
||||||
|
leader_committed_log_idx 101
|
||||||
|
target_committed_log_idx 101
|
||||||
|
last_snapshot_idx 50
|
||||||
|
```
|
||||||
|
|
||||||
## Migration from ZooKeeper {#migration-from-zookeeper}
|
## Migration from ZooKeeper {#migration-from-zookeeper}
|
||||||
|
|
||||||
Seamlessly migration from ZooKeeper to ClickHouse Keeper is impossible you have to stop your ZooKeeper cluster, convert data and start ClickHouse Keeper. `clickhouse-keeper-converter` tool allows converting ZooKeeper logs and snapshots to ClickHouse Keeper snapshot. It works only with ZooKeeper > 3.4. Steps for migration:
|
Seamlessly migration from ZooKeeper to ClickHouse Keeper is impossible you have to stop your ZooKeeper cluster, convert data and start ClickHouse Keeper. `clickhouse-keeper-converter` tool allows converting ZooKeeper logs and snapshots to ClickHouse Keeper snapshot. It works only with ZooKeeper > 3.4. Steps for migration:
|
||||||
|
@ -24,6 +24,7 @@ Columns:
|
|||||||
- `DOUBLE_SHA1_PASSWORD`
|
- `DOUBLE_SHA1_PASSWORD`
|
||||||
- `LDAP`
|
- `LDAP`
|
||||||
- `KERBEROS`
|
- `KERBEROS`
|
||||||
|
- `SSL_CERTIFICATE`
|
||||||
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users.
|
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users.
|
||||||
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied.
|
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied.
|
||||||
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out.
|
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out.
|
||||||
|
@ -12,7 +12,7 @@ Columns:
|
|||||||
|
|
||||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter.
|
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter.
|
||||||
|
|
||||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password.
|
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3, 'ldap' = 4, 'kerberos' = 5, 'ssl_certificate' = 6)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password.
|
||||||
|
|
||||||
- `auth_params` ([String](../../sql-reference/data-types/string.md)) — Authentication parameters in the JSON format depending on the `auth_type`.
|
- `auth_params` ([String](../../sql-reference/data-types/string.md)) — Authentication parameters in the JSON format depending on the `auth_type`.
|
||||||
|
|
||||||
|
@ -109,56 +109,38 @@ In the report you can find:
|
|||||||
|
|
||||||
`clickhouse-benchmark` can compare performances for two running ClickHouse servers.
|
`clickhouse-benchmark` can compare performances for two running ClickHouse servers.
|
||||||
|
|
||||||
To use the comparison mode, specify endpoints of both servers by two pairs of `--host`, `--port` keys. Keys matched together by position in arguments list, the first `--host` is matched with the first `--port` and so on. `clickhouse-benchmark` establishes connections to both servers, then sends queries. Each query addressed to a randomly selected server. The results are shown for each server separately.
|
To use the comparison mode, specify endpoints of both servers by two pairs of `--host`, `--port` keys. Keys matched together by position in arguments list, the first `--host` is matched with the first `--port` and so on. `clickhouse-benchmark` establishes connections to both servers, then sends queries. Each query addressed to a randomly selected server. The results are shown in a table.
|
||||||
|
|
||||||
## Example {#clickhouse-benchmark-example}
|
## Example {#clickhouse-benchmark-example}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10
|
$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark --host=localhost --port=9001 --host=localhost --port=9000 -i 10
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
Loaded 1 queries.
|
Loaded 1 queries.
|
||||||
|
|
||||||
Queries executed: 6.
|
Queries executed: 5.
|
||||||
|
|
||||||
localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459.
|
localhost:9001, queries 2, QPS: 3.764, RPS: 75446929.370, MiB/s: 575.614, result RPS: 37639659.982, result MiB/s: 287.168.
|
||||||
|
localhost:9000, queries 3, QPS: 3.815, RPS: 76466659.385, MiB/s: 583.394, result RPS: 38148392.297, result MiB/s: 291.049.
|
||||||
|
|
||||||
0.000% 0.159 sec.
|
0.000% 0.258 sec. 0.250 sec.
|
||||||
10.000% 0.159 sec.
|
10.000% 0.258 sec. 0.250 sec.
|
||||||
20.000% 0.159 sec.
|
20.000% 0.258 sec. 0.250 sec.
|
||||||
30.000% 0.160 sec.
|
30.000% 0.258 sec. 0.267 sec.
|
||||||
40.000% 0.160 sec.
|
40.000% 0.258 sec. 0.267 sec.
|
||||||
50.000% 0.162 sec.
|
50.000% 0.273 sec. 0.267 sec.
|
||||||
60.000% 0.164 sec.
|
60.000% 0.273 sec. 0.267 sec.
|
||||||
70.000% 0.165 sec.
|
70.000% 0.273 sec. 0.267 sec.
|
||||||
80.000% 0.166 sec.
|
80.000% 0.273 sec. 0.269 sec.
|
||||||
90.000% 0.166 sec.
|
90.000% 0.273 sec. 0.269 sec.
|
||||||
95.000% 0.167 sec.
|
95.000% 0.273 sec. 0.269 sec.
|
||||||
99.000% 0.167 sec.
|
99.000% 0.273 sec. 0.269 sec.
|
||||||
99.900% 0.167 sec.
|
99.900% 0.273 sec. 0.269 sec.
|
||||||
99.990% 0.167 sec.
|
99.990% 0.273 sec. 0.269 sec.
|
||||||
|
|
||||||
|
No difference proven at 99.5% confidence
|
||||||
|
|
||||||
Queries executed: 10.
|
|
||||||
|
|
||||||
localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986.
|
|
||||||
|
|
||||||
0.000% 0.159 sec.
|
|
||||||
10.000% 0.159 sec.
|
|
||||||
20.000% 0.160 sec.
|
|
||||||
30.000% 0.163 sec.
|
|
||||||
40.000% 0.164 sec.
|
|
||||||
50.000% 0.165 sec.
|
|
||||||
60.000% 0.166 sec.
|
|
||||||
70.000% 0.166 sec.
|
|
||||||
80.000% 0.167 sec.
|
|
||||||
90.000% 0.167 sec.
|
|
||||||
95.000% 0.170 sec.
|
|
||||||
99.000% 0.172 sec.
|
|
||||||
99.900% 0.172 sec.
|
|
||||||
99.990% 0.172 sec.
|
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/utilities/clickhouse-benchmark.md) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/operations/utilities/clickhouse-benchmark.md) <!--hide-->
|
||||||
|
@ -593,6 +593,27 @@ LIMIT 10
|
|||||||
└────────────────┴─────────┘
|
└────────────────┴─────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## formatReadableDecimalSize(x)
|
||||||
|
|
||||||
|
Accepts the size (number of bytes). Returns a rounded size with a suffix (KB, MB, etc.) as a string.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes,
|
||||||
|
formatReadableDecimalSize(filesize_bytes) AS filesize
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─filesize_bytes─┬─filesize───┐
|
||||||
|
│ 1 │ 1.00 B │
|
||||||
|
│ 1024 │ 1.02 KB │
|
||||||
|
│ 1048576 │ 1.05 MB │
|
||||||
|
│ 192851925 │ 192.85 MB │
|
||||||
|
└────────────────┴────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## formatReadableSize(x)
|
## formatReadableSize(x)
|
||||||
|
|
||||||
Accepts the size (number of bytes). Returns a rounded size with a suffix (KiB, MiB, etc.) as a string.
|
Accepts the size (number of bytes). Returns a rounded size with a suffix (KiB, MiB, etc.) as a string.
|
||||||
|
@ -1150,3 +1150,13 @@ A text with tags .
|
|||||||
The content within <b>CDATA</b>
|
The content within <b>CDATA</b>
|
||||||
Do Nothing for 2 Minutes 2:00
|
Do Nothing for 2 Minutes 2:00
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## ascii(s) {#ascii}
|
||||||
|
|
||||||
|
Returns the ASCII code point of the first character of str. The result type is Int32.
|
||||||
|
|
||||||
|
If s is empty, the result is 0. If the first character is not an ASCII character or not part of the Latin-1 Supplement range of UTF-16, the result is undefined.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ Syntax:
|
|||||||
``` sql
|
``` sql
|
||||||
ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
|
ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
|
||||||
[, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
|
[, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
|
||||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}]
|
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'}]
|
||||||
[[ADD | DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
[[ADD | DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
||||||
[DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]
|
[DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]
|
||||||
[GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]]
|
[GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]]
|
||||||
|
@ -8,7 +8,7 @@ title: "CHECK TABLE Statement"
|
|||||||
Checks if the data in the table is corrupted.
|
Checks if the data in the table is corrupted.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CHECK TABLE [db.]name
|
CHECK TABLE [db.]name [PARTITION partition_expr]
|
||||||
```
|
```
|
||||||
|
|
||||||
The `CHECK TABLE` query compares actual file sizes with the expected values which are stored on the server. If the file sizes do not match the stored values, it means the data is corrupted. This can be caused, for example, by a system crash during query execution.
|
The `CHECK TABLE` query compares actual file sizes with the expected values which are stored on the server. If the file sizes do not match the stored values, it means the data is corrupted. This can be caused, for example, by a system crash during query execution.
|
||||||
|
@ -12,7 +12,7 @@ Syntax:
|
|||||||
``` sql
|
``` sql
|
||||||
CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
|
CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
|
||||||
[, name2 [ON CLUSTER cluster_name2] ...]
|
[, name2 [ON CLUSTER cluster_name2] ...]
|
||||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}]
|
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'}]
|
||||||
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
||||||
[DEFAULT ROLE role [,...]]
|
[DEFAULT ROLE role [,...]]
|
||||||
[DEFAULT DATABASE database | NONE]
|
[DEFAULT DATABASE database | NONE]
|
||||||
@ -34,6 +34,7 @@ There are multiple ways of user identification:
|
|||||||
- `IDENTIFIED WITH double_sha1_hash BY 'hash'`
|
- `IDENTIFIED WITH double_sha1_hash BY 'hash'`
|
||||||
- `IDENTIFIED WITH ldap SERVER 'server_name'`
|
- `IDENTIFIED WITH ldap SERVER 'server_name'`
|
||||||
- `IDENTIFIED WITH kerberos` or `IDENTIFIED WITH kerberos REALM 'realm'`
|
- `IDENTIFIED WITH kerberos` or `IDENTIFIED WITH kerberos REALM 'realm'`
|
||||||
|
- `IDENTIFIED WITH ssl_certificate CN 'mysite.com:user'`
|
||||||
|
|
||||||
For identification with sha256_hash using `SALT` - hash must be calculated from concatination of 'password' and 'salt'.
|
For identification with sha256_hash using `SALT` - hash must be calculated from concatination of 'password' and 'salt'.
|
||||||
|
|
||||||
|
@ -281,8 +281,8 @@ After running this statement the `[db.]replicated_merge_tree_family_table_name`
|
|||||||
|
|
||||||
### RESTART REPLICA
|
### RESTART REPLICA
|
||||||
|
|
||||||
Provides possibility to reinitialize Zookeeper sessions state for `ReplicatedMergeTree` table, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed.
|
Provides possibility to reinitialize Zookeeper session's state for `ReplicatedMergeTree` table, will compare current state with Zookeeper as source of truth and add tasks to Zookeeper queue if needed.
|
||||||
Initialization replication queue based on ZooKeeper date happens in the same way as `ATTACH TABLE` statement. For a short time the table will be unavailable for any operations.
|
Initialization of replication queue based on ZooKeeper data happens in the same way as for `ATTACH TABLE` statement. For a short time, the table will be unavailable for any operations.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
|
SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
|
||||||
|
@ -1088,7 +1088,8 @@ void Client::processConfig()
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
need_render_progress = config().getBool("progress", false);
|
std::string progress = config().getString("progress", "tty");
|
||||||
|
need_render_progress = (Poco::icompare(progress, "off") && Poco::icompare(progress, "no") && Poco::icompare(progress, "false") && Poco::icompare(progress, "0"));
|
||||||
echo_queries = config().getBool("echo", false);
|
echo_queries = config().getBool("echo", false);
|
||||||
ignore_error = config().getBool("ignore-error", false);
|
ignore_error = config().getBool("ignore-error", false);
|
||||||
|
|
||||||
|
@ -489,7 +489,8 @@ void LocalServer::processConfig()
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
need_render_progress = config().getBool("progress", false);
|
std::string progress = config().getString("progress", "tty");
|
||||||
|
need_render_progress = (Poco::icompare(progress, "off") && Poco::icompare(progress, "no") && Poco::icompare(progress, "false") && Poco::icompare(progress, "0"));
|
||||||
echo_queries = config().hasOption("echo") || config().hasOption("verbose");
|
echo_queries = config().hasOption("echo") || config().hasOption("verbose");
|
||||||
ignore_error = config().getBool("ignore-error", false);
|
ignore_error = config().getBool("ignore-error", false);
|
||||||
is_multiquery = true;
|
is_multiquery = true;
|
||||||
|
@ -65,10 +65,12 @@
|
|||||||
#include <Interpreters/ReplaceQueryParameterVisitor.h>
|
#include <Interpreters/ReplaceQueryParameterVisitor.h>
|
||||||
#include <Interpreters/ProfileEventsExt.h>
|
#include <Interpreters/ProfileEventsExt.h>
|
||||||
#include <IO/WriteBufferFromOStream.h>
|
#include <IO/WriteBufferFromOStream.h>
|
||||||
|
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||||
#include <IO/CompressionMethod.h>
|
#include <IO/CompressionMethod.h>
|
||||||
#include <Client/InternalTextLogs.h>
|
#include <Client/InternalTextLogs.h>
|
||||||
#include <IO/ForkWriteBuffer.h>
|
#include <IO/ForkWriteBuffer.h>
|
||||||
#include <Parsers/Kusto/ParserKQLStatement.h>
|
#include <Parsers/Kusto/ParserKQLStatement.h>
|
||||||
|
#include <boost/algorithm/string/case_conv.hpp>
|
||||||
|
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
@ -103,6 +105,7 @@ namespace ErrorCodes
|
|||||||
extern const int CANNOT_SET_SIGNAL_HANDLER;
|
extern const int CANNOT_SET_SIGNAL_HANDLER;
|
||||||
extern const int UNRECOGNIZED_ARGUMENTS;
|
extern const int UNRECOGNIZED_ARGUMENTS;
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
|
extern const int CANNOT_OPEN_FILE;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -116,6 +119,25 @@ namespace ProfileEvents
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
std::istream& operator>> (std::istream & in, ProgressOption & progress)
|
||||||
|
{
|
||||||
|
std::string token;
|
||||||
|
in >> token;
|
||||||
|
|
||||||
|
boost::to_upper(token);
|
||||||
|
|
||||||
|
if (token == "OFF" || token == "FALSE" || token == "0" || token == "NO")
|
||||||
|
progress = ProgressOption::OFF;
|
||||||
|
else if (token == "TTY" || token == "ON" || token == "TRUE" || token == "1" || token == "YES")
|
||||||
|
progress = ProgressOption::TTY;
|
||||||
|
else if (token == "ERR")
|
||||||
|
progress = ProgressOption::ERR;
|
||||||
|
else
|
||||||
|
throw boost::program_options::validation_error(boost::program_options::validation_error::invalid_option_value);
|
||||||
|
|
||||||
|
return in;
|
||||||
|
}
|
||||||
|
|
||||||
static ClientInfo::QueryKind parseQueryKind(const String & query_kind)
|
static ClientInfo::QueryKind parseQueryKind(const String & query_kind)
|
||||||
{
|
{
|
||||||
if (query_kind == "initial_query")
|
if (query_kind == "initial_query")
|
||||||
@ -413,8 +435,8 @@ void ClientBase::onData(Block & block, ASTPtr parsed_query)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/// If results are written INTO OUTFILE, we can avoid clearing progress to avoid flicker.
|
/// If results are written INTO OUTFILE, we can avoid clearing progress to avoid flicker.
|
||||||
if (need_render_progress && (stdout_is_a_tty || is_interactive) && (!select_into_file || select_into_file_and_stdout))
|
if (need_render_progress && tty_buf && (!select_into_file || select_into_file_and_stdout))
|
||||||
progress_indication.clearProgressOutput();
|
progress_indication.clearProgressOutput(*tty_buf);
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -431,11 +453,11 @@ void ClientBase::onData(Block & block, ASTPtr parsed_query)
|
|||||||
output_format->flush();
|
output_format->flush();
|
||||||
|
|
||||||
/// Restore progress bar after data block.
|
/// Restore progress bar after data block.
|
||||||
if (need_render_progress && (stdout_is_a_tty || is_interactive))
|
if (need_render_progress && tty_buf)
|
||||||
{
|
{
|
||||||
if (select_into_file && !select_into_file_and_stdout)
|
if (select_into_file && !select_into_file_and_stdout)
|
||||||
std::cerr << "\r";
|
std::cerr << "\r";
|
||||||
progress_indication.writeProgress();
|
progress_indication.writeProgress(*tty_buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -443,7 +465,8 @@ void ClientBase::onData(Block & block, ASTPtr parsed_query)
|
|||||||
void ClientBase::onLogData(Block & block)
|
void ClientBase::onLogData(Block & block)
|
||||||
{
|
{
|
||||||
initLogsOutputStream();
|
initLogsOutputStream();
|
||||||
progress_indication.clearProgressOutput();
|
if (need_render_progress && tty_buf)
|
||||||
|
progress_indication.clearProgressOutput(*tty_buf);
|
||||||
logs_out_stream->writeLogs(block);
|
logs_out_stream->writeLogs(block);
|
||||||
logs_out_stream->flush();
|
logs_out_stream->flush();
|
||||||
}
|
}
|
||||||
@ -639,6 +662,58 @@ void ClientBase::initLogsOutputStream()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ClientBase::initTtyBuffer(bool to_err)
|
||||||
|
{
|
||||||
|
if (!tty_buf)
|
||||||
|
{
|
||||||
|
static constexpr auto tty_file_name = "/dev/tty";
|
||||||
|
|
||||||
|
/// Output all progress bar commands to terminal at once to avoid flicker.
|
||||||
|
/// This size is usually greater than the window size.
|
||||||
|
static constexpr size_t buf_size = 1024;
|
||||||
|
|
||||||
|
if (!to_err)
|
||||||
|
{
|
||||||
|
std::error_code ec;
|
||||||
|
std::filesystem::file_status tty = std::filesystem::status(tty_file_name, ec);
|
||||||
|
|
||||||
|
if (!ec && exists(tty) && is_character_file(tty)
|
||||||
|
&& (tty.permissions() & std::filesystem::perms::others_write) != std::filesystem::perms::none)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
tty_buf = std::make_unique<WriteBufferFromFile>(tty_file_name, buf_size);
|
||||||
|
|
||||||
|
/// It is possible that the terminal file has writeable permissions
|
||||||
|
/// but we cannot write anything there. Check it with invisible character.
|
||||||
|
tty_buf->write('\0');
|
||||||
|
tty_buf->next();
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
catch (const Exception & e)
|
||||||
|
{
|
||||||
|
if (tty_buf)
|
||||||
|
tty_buf.reset();
|
||||||
|
|
||||||
|
if (e.code() != ErrorCodes::CANNOT_OPEN_FILE)
|
||||||
|
throw;
|
||||||
|
|
||||||
|
/// It is normal if file exists, indicated as writeable but still cannot be opened.
|
||||||
|
/// Fallback to other options.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stderr_is_a_tty)
|
||||||
|
{
|
||||||
|
tty_buf = std::make_unique<WriteBufferFromFileDescriptor>(STDERR_FILENO, buf_size);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
need_render_progress = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ClientBase::updateSuggest(const ASTPtr & ast)
|
void ClientBase::updateSuggest(const ASTPtr & ast)
|
||||||
{
|
{
|
||||||
std::vector<std::string> new_words;
|
std::vector<std::string> new_words;
|
||||||
@ -937,14 +1012,15 @@ void ClientBase::onProgress(const Progress & value)
|
|||||||
if (output_format)
|
if (output_format)
|
||||||
output_format->onProgress(value);
|
output_format->onProgress(value);
|
||||||
|
|
||||||
if (need_render_progress)
|
if (need_render_progress && tty_buf)
|
||||||
progress_indication.writeProgress();
|
progress_indication.writeProgress(*tty_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ClientBase::onEndOfStream()
|
void ClientBase::onEndOfStream()
|
||||||
{
|
{
|
||||||
progress_indication.clearProgressOutput();
|
if (need_render_progress && tty_buf)
|
||||||
|
progress_indication.clearProgressOutput(*tty_buf);
|
||||||
|
|
||||||
if (output_format)
|
if (output_format)
|
||||||
output_format->finalize();
|
output_format->finalize();
|
||||||
@ -952,10 +1028,7 @@ void ClientBase::onEndOfStream()
|
|||||||
resetOutput();
|
resetOutput();
|
||||||
|
|
||||||
if (is_interactive && !written_first_block)
|
if (is_interactive && !written_first_block)
|
||||||
{
|
|
||||||
progress_indication.clearProgressOutput();
|
|
||||||
std::cout << "Ok." << std::endl;
|
std::cout << "Ok." << std::endl;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -998,15 +1071,16 @@ void ClientBase::onProfileEvents(Block & block)
|
|||||||
}
|
}
|
||||||
progress_indication.updateThreadEventData(thread_times);
|
progress_indication.updateThreadEventData(thread_times);
|
||||||
|
|
||||||
if (need_render_progress)
|
if (need_render_progress && tty_buf)
|
||||||
progress_indication.writeProgress();
|
progress_indication.writeProgress(*tty_buf);
|
||||||
|
|
||||||
if (profile_events.print)
|
if (profile_events.print)
|
||||||
{
|
{
|
||||||
if (profile_events.watch.elapsedMilliseconds() >= profile_events.delay_ms)
|
if (profile_events.watch.elapsedMilliseconds() >= profile_events.delay_ms)
|
||||||
{
|
{
|
||||||
initLogsOutputStream();
|
initLogsOutputStream();
|
||||||
progress_indication.clearProgressOutput();
|
if (need_render_progress && tty_buf)
|
||||||
|
progress_indication.clearProgressOutput(*tty_buf);
|
||||||
logs_out_stream->writeProfileEvents(block);
|
logs_out_stream->writeProfileEvents(block);
|
||||||
logs_out_stream->flush();
|
logs_out_stream->flush();
|
||||||
|
|
||||||
@ -1180,7 +1254,8 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
|
|||||||
progress_indication.updateProgress(Progress(file_progress));
|
progress_indication.updateProgress(Progress(file_progress));
|
||||||
|
|
||||||
/// Set callback to be called on file progress.
|
/// Set callback to be called on file progress.
|
||||||
progress_indication.setFileProgressCallback(global_context, true);
|
if (tty_buf)
|
||||||
|
progress_indication.setFileProgressCallback(global_context, *tty_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If data fetched from file (maybe compressed file)
|
/// If data fetched from file (maybe compressed file)
|
||||||
@ -1432,12 +1507,12 @@ bool ClientBase::receiveEndOfQuery()
|
|||||||
void ClientBase::cancelQuery()
|
void ClientBase::cancelQuery()
|
||||||
{
|
{
|
||||||
connection->sendCancel();
|
connection->sendCancel();
|
||||||
|
if (need_render_progress && tty_buf)
|
||||||
|
progress_indication.clearProgressOutput(*tty_buf);
|
||||||
|
|
||||||
if (is_interactive)
|
if (is_interactive)
|
||||||
{
|
|
||||||
progress_indication.clearProgressOutput();
|
|
||||||
std::cout << "Cancelling query." << std::endl;
|
std::cout << "Cancelling query." << std::endl;
|
||||||
|
|
||||||
}
|
|
||||||
cancelled = true;
|
cancelled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1557,7 +1632,8 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
|||||||
if (profile_events.last_block)
|
if (profile_events.last_block)
|
||||||
{
|
{
|
||||||
initLogsOutputStream();
|
initLogsOutputStream();
|
||||||
progress_indication.clearProgressOutput();
|
if (need_render_progress && tty_buf)
|
||||||
|
progress_indication.clearProgressOutput(*tty_buf);
|
||||||
logs_out_stream->writeProfileEvents(profile_events.last_block);
|
logs_out_stream->writeProfileEvents(profile_events.last_block);
|
||||||
logs_out_stream->flush();
|
logs_out_stream->flush();
|
||||||
|
|
||||||
@ -2248,7 +2324,7 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
("stage", po::value<std::string>()->default_value("complete"), "Request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation,with_mergeable_state_after_aggregation_and_limit")
|
("stage", po::value<std::string>()->default_value("complete"), "Request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation,with_mergeable_state_after_aggregation_and_limit")
|
||||||
("query_kind", po::value<std::string>()->default_value("initial_query"), "One of initial_query/secondary_query/no_query")
|
("query_kind", po::value<std::string>()->default_value("initial_query"), "One of initial_query/secondary_query/no_query")
|
||||||
("query_id", po::value<std::string>(), "query_id")
|
("query_id", po::value<std::string>(), "query_id")
|
||||||
("progress", "print progress of queries execution")
|
("progress", po::value<ProgressOption>()->implicit_value(ProgressOption::TTY, "tty")->default_value(ProgressOption::TTY, "tty"), "Print progress of queries execution - to TTY (default): tty|on|1|true|yes; to STDERR: err; OFF: off|0|false|no")
|
||||||
|
|
||||||
("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.")
|
("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.")
|
||||||
("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)")
|
("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)")
|
||||||
@ -2303,6 +2379,11 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
parseAndCheckOptions(options_description, options, common_arguments);
|
parseAndCheckOptions(options_description, options, common_arguments);
|
||||||
po::notify(options);
|
po::notify(options);
|
||||||
|
|
||||||
|
if (options["progress"].as<ProgressOption>() == ProgressOption::OFF)
|
||||||
|
need_render_progress = false;
|
||||||
|
else
|
||||||
|
initTtyBuffer(options["progress"].as<ProgressOption>() == ProgressOption::ERR);
|
||||||
|
|
||||||
if (options.count("version") || options.count("V"))
|
if (options.count("version") || options.count("V"))
|
||||||
{
|
{
|
||||||
showClientVersion();
|
showClientVersion();
|
||||||
@ -2353,7 +2434,20 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
if (options.count("profile-events-delay-ms"))
|
if (options.count("profile-events-delay-ms"))
|
||||||
config().setUInt64("profile-events-delay-ms", options["profile-events-delay-ms"].as<UInt64>());
|
config().setUInt64("profile-events-delay-ms", options["profile-events-delay-ms"].as<UInt64>());
|
||||||
if (options.count("progress"))
|
if (options.count("progress"))
|
||||||
config().setBool("progress", true);
|
{
|
||||||
|
switch (options["progress"].as<ProgressOption>())
|
||||||
|
{
|
||||||
|
case OFF:
|
||||||
|
config().setString("progress", "off");
|
||||||
|
break;
|
||||||
|
case TTY:
|
||||||
|
config().setString("progress", "tty");
|
||||||
|
break;
|
||||||
|
case ERR:
|
||||||
|
config().setString("progress", "err");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (options.count("echo"))
|
if (options.count("echo"))
|
||||||
config().setBool("echo", true);
|
config().setBool("echo", true);
|
||||||
if (options.count("disable_suggestion"))
|
if (options.count("disable_suggestion"))
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <Storages/StorageFile.h>
|
#include <Storages/StorageFile.h>
|
||||||
#include <Storages/SelectQueryInfo.h>
|
#include <Storages/SelectQueryInfo.h>
|
||||||
|
|
||||||
|
|
||||||
namespace po = boost::program_options;
|
namespace po = boost::program_options;
|
||||||
|
|
||||||
|
|
||||||
@ -35,9 +36,18 @@ enum MultiQueryProcessingStage
|
|||||||
PARSING_FAILED,
|
PARSING_FAILED,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum ProgressOption
|
||||||
|
{
|
||||||
|
OFF,
|
||||||
|
TTY,
|
||||||
|
ERR,
|
||||||
|
};
|
||||||
|
std::istream& operator>> (std::istream & in, ProgressOption & progress);
|
||||||
|
|
||||||
void interruptSignalHandler(int signum);
|
void interruptSignalHandler(int signum);
|
||||||
|
|
||||||
class InternalTextLogs;
|
class InternalTextLogs;
|
||||||
|
class WriteBufferFromFileDescriptor;
|
||||||
|
|
||||||
class ClientBase : public Poco::Util::Application, public IHints<2, ClientBase>
|
class ClientBase : public Poco::Util::Application, public IHints<2, ClientBase>
|
||||||
{
|
{
|
||||||
@ -143,6 +153,7 @@ private:
|
|||||||
|
|
||||||
void initOutputFormat(const Block & block, ASTPtr parsed_query);
|
void initOutputFormat(const Block & block, ASTPtr parsed_query);
|
||||||
void initLogsOutputStream();
|
void initLogsOutputStream();
|
||||||
|
void initTtyBuffer(bool to_err = false);
|
||||||
|
|
||||||
String prompt() const;
|
String prompt() const;
|
||||||
|
|
||||||
@ -218,6 +229,10 @@ protected:
|
|||||||
String server_logs_file;
|
String server_logs_file;
|
||||||
std::unique_ptr<InternalTextLogs> logs_out_stream;
|
std::unique_ptr<InternalTextLogs> logs_out_stream;
|
||||||
|
|
||||||
|
/// /dev/tty if accessible or std::cerr - for progress bar.
|
||||||
|
/// We prefer to output progress bar directly to tty to allow user to redirect stdout and stderr and still get the progress indication.
|
||||||
|
std::unique_ptr<WriteBufferFromFileDescriptor> tty_buf;
|
||||||
|
|
||||||
String home_path;
|
String home_path;
|
||||||
String history_file; /// Path to a file containing command history.
|
String history_file; /// Path to a file containing command history.
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
|
#include <filesystem>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
@ -11,6 +12,9 @@
|
|||||||
#include "IO/WriteBufferFromString.h"
|
#include "IO/WriteBufferFromString.h"
|
||||||
#include <Databases/DatabaseMemory.h>
|
#include <Databases/DatabaseMemory.h>
|
||||||
|
|
||||||
|
/// http://en.wikipedia.org/wiki/ANSI_escape_code
|
||||||
|
#define CLEAR_TO_END_OF_LINE "\033[K"
|
||||||
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
@ -44,15 +48,6 @@ bool ProgressIndication::updateProgress(const Progress & value)
|
|||||||
return progress.incrementPiecewiseAtomically(value);
|
return progress.incrementPiecewiseAtomically(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProgressIndication::clearProgressOutput()
|
|
||||||
{
|
|
||||||
if (written_progress_chars)
|
|
||||||
{
|
|
||||||
written_progress_chars = 0;
|
|
||||||
std::cerr << "\r" CLEAR_TO_END_OF_LINE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ProgressIndication::resetProgress()
|
void ProgressIndication::resetProgress()
|
||||||
{
|
{
|
||||||
watch.restart();
|
watch.restart();
|
||||||
@ -67,15 +62,12 @@ void ProgressIndication::resetProgress()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProgressIndication::setFileProgressCallback(ContextMutablePtr context, bool write_progress_on_update_)
|
void ProgressIndication::setFileProgressCallback(ContextMutablePtr context, WriteBufferFromFileDescriptor & message)
|
||||||
{
|
{
|
||||||
write_progress_on_update = write_progress_on_update_;
|
|
||||||
context->setFileProgressCallback([&](const FileProgress & file_progress)
|
context->setFileProgressCallback([&](const FileProgress & file_progress)
|
||||||
{
|
{
|
||||||
progress.incrementPiecewiseAtomically(Progress(file_progress));
|
progress.incrementPiecewiseAtomically(Progress(file_progress));
|
||||||
|
writeProgress(message);
|
||||||
if (write_progress_on_update)
|
|
||||||
writeProgress();
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,13 +134,10 @@ void ProgressIndication::writeFinalProgress()
|
|||||||
std::cout << ". ";
|
std::cout << ". ";
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProgressIndication::writeProgress()
|
void ProgressIndication::writeProgress(WriteBufferFromFileDescriptor & message)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(progress_mutex);
|
std::lock_guard lock(progress_mutex);
|
||||||
|
|
||||||
/// Output all progress bar commands to stderr at once to avoid flicker.
|
|
||||||
WriteBufferFromFileDescriptor message(STDERR_FILENO, 1024);
|
|
||||||
|
|
||||||
static size_t increment = 0;
|
static size_t increment = 0;
|
||||||
static const char * indicators[8] = {
|
static const char * indicators[8] = {
|
||||||
"\033[1;30m→\033[0m",
|
"\033[1;30m→\033[0m",
|
||||||
@ -307,4 +296,14 @@ void ProgressIndication::writeProgress()
|
|||||||
message.next();
|
message.next();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ProgressIndication::clearProgressOutput(WriteBufferFromFileDescriptor & message)
|
||||||
|
{
|
||||||
|
if (written_progress_chars)
|
||||||
|
{
|
||||||
|
written_progress_chars = 0;
|
||||||
|
message << "\r" CLEAR_TO_END_OF_LINE;
|
||||||
|
message.next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -9,12 +9,12 @@
|
|||||||
#include <Common/Stopwatch.h>
|
#include <Common/Stopwatch.h>
|
||||||
#include <Common/EventRateMeter.h>
|
#include <Common/EventRateMeter.h>
|
||||||
|
|
||||||
/// http://en.wikipedia.org/wiki/ANSI_escape_code
|
|
||||||
#define CLEAR_TO_END_OF_LINE "\033[K"
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
class WriteBufferFromFileDescriptor;
|
||||||
|
|
||||||
struct ThreadEventData
|
struct ThreadEventData
|
||||||
{
|
{
|
||||||
UInt64 time() const noexcept { return user_ms + system_ms; }
|
UInt64 time() const noexcept { return user_ms + system_ms; }
|
||||||
@ -30,14 +30,13 @@ using HostToThreadTimesMap = std::unordered_map<String, ThreadIdToTimeMap>;
|
|||||||
class ProgressIndication
|
class ProgressIndication
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/// Write progress to stderr.
|
/// Write progress bar.
|
||||||
void writeProgress();
|
void writeProgress(WriteBufferFromFileDescriptor & message);
|
||||||
|
void clearProgressOutput(WriteBufferFromFileDescriptor & message);
|
||||||
|
|
||||||
|
/// Write summary.
|
||||||
void writeFinalProgress();
|
void writeFinalProgress();
|
||||||
|
|
||||||
/// Clear stderr output.
|
|
||||||
void clearProgressOutput();
|
|
||||||
|
|
||||||
/// Reset progress values.
|
/// Reset progress values.
|
||||||
void resetProgress();
|
void resetProgress();
|
||||||
|
|
||||||
@ -52,7 +51,7 @@ public:
|
|||||||
/// In some cases there is a need to update progress value, when there is no access to progress_inidcation object.
|
/// In some cases there is a need to update progress value, when there is no access to progress_inidcation object.
|
||||||
/// In this case it is added via context.
|
/// In this case it is added via context.
|
||||||
/// `write_progress_on_update` is needed to write progress for loading files data via pipe in non-interactive mode.
|
/// `write_progress_on_update` is needed to write progress for loading files data via pipe in non-interactive mode.
|
||||||
void setFileProgressCallback(ContextMutablePtr context, bool write_progress_on_update = false);
|
void setFileProgressCallback(ContextMutablePtr context, WriteBufferFromFileDescriptor & message);
|
||||||
|
|
||||||
/// How much seconds passed since query execution start.
|
/// How much seconds passed since query execution start.
|
||||||
double elapsedSeconds() const { return getElapsedNanoseconds() / 1e9; }
|
double elapsedSeconds() const { return getElapsedNanoseconds() / 1e9; }
|
||||||
|
@ -36,7 +36,7 @@ void CoordinationSettings::loadFromConfig(const String & config_elem, const Poco
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const String KeeperConfigurationAndSettings::DEFAULT_FOUR_LETTER_WORD_CMD = "conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv";
|
const String KeeperConfigurationAndSettings::DEFAULT_FOUR_LETTER_WORD_CMD = "conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif";
|
||||||
|
|
||||||
KeeperConfigurationAndSettings::KeeperConfigurationAndSettings()
|
KeeperConfigurationAndSettings::KeeperConfigurationAndSettings()
|
||||||
: server_id(NOT_EXIST)
|
: server_id(NOT_EXIST)
|
||||||
|
@ -136,6 +136,12 @@ void FourLetterCommandFactory::registerCommands(KeeperDispatcher & keeper_dispat
|
|||||||
FourLetterCommandPtr api_version_command = std::make_shared<ApiVersionCommand>(keeper_dispatcher);
|
FourLetterCommandPtr api_version_command = std::make_shared<ApiVersionCommand>(keeper_dispatcher);
|
||||||
factory.registerCommand(api_version_command);
|
factory.registerCommand(api_version_command);
|
||||||
|
|
||||||
|
FourLetterCommandPtr create_snapshot_command = std::make_shared<CreateSnapshotCommand>(keeper_dispatcher);
|
||||||
|
factory.registerCommand(create_snapshot_command);
|
||||||
|
|
||||||
|
FourLetterCommandPtr log_info_command = std::make_shared<LogInfoCommand>(keeper_dispatcher);
|
||||||
|
factory.registerCommand(log_info_command);
|
||||||
|
|
||||||
factory.initializeAllowList(keeper_dispatcher);
|
factory.initializeAllowList(keeper_dispatcher);
|
||||||
factory.setInitialize(true);
|
factory.setInitialize(true);
|
||||||
}
|
}
|
||||||
@ -472,4 +478,33 @@ String ApiVersionCommand::run()
|
|||||||
return toString(static_cast<uint8_t>(Coordination::current_keeper_api_version));
|
return toString(static_cast<uint8_t>(Coordination::current_keeper_api_version));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String CreateSnapshotCommand::run()
|
||||||
|
{
|
||||||
|
auto log_index = keeper_dispatcher.createSnapshot();
|
||||||
|
return log_index > 0 ? std::to_string(log_index) : "Failed to schedule snapshot creation task.";
|
||||||
|
}
|
||||||
|
|
||||||
|
String LogInfoCommand::run()
|
||||||
|
{
|
||||||
|
KeeperLogInfo log_info = keeper_dispatcher.getKeeperLogInfo();
|
||||||
|
StringBuffer ret;
|
||||||
|
|
||||||
|
auto append = [&ret] (String key, uint64_t value) -> void
|
||||||
|
{
|
||||||
|
writeText(key, ret);
|
||||||
|
writeText('\t', ret);
|
||||||
|
writeText(std::to_string(value), ret);
|
||||||
|
writeText('\n', ret);
|
||||||
|
};
|
||||||
|
append("first_log_idx", log_info.first_log_idx);
|
||||||
|
append("first_log_term", log_info.first_log_idx);
|
||||||
|
append("last_log_idx", log_info.last_log_idx);
|
||||||
|
append("last_log_term", log_info.last_log_term);
|
||||||
|
append("last_committed_log_idx", log_info.last_committed_log_idx);
|
||||||
|
append("leader_committed_log_idx", log_info.leader_committed_log_idx);
|
||||||
|
append("target_committed_log_idx", log_info.target_committed_log_idx);
|
||||||
|
append("last_snapshot_idx", log_info.last_snapshot_idx);
|
||||||
|
return ret.str();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ using FourLetterCommandPtr = std::shared_ptr<DB::IFourLetterCommand>;
|
|||||||
/// Just like zookeeper Four Letter Words commands, CH Keeper responds to a small set of commands.
|
/// Just like zookeeper Four Letter Words commands, CH Keeper responds to a small set of commands.
|
||||||
/// Each command is composed of four letters, these commands are useful to monitor and issue system problems.
|
/// Each command is composed of four letters, these commands are useful to monitor and issue system problems.
|
||||||
/// The feature is based on Zookeeper 3.5.9, details is in https://zookeeper.apache.org/doc/r3.5.9/zookeeperAdmin.html#sc_zkCommands.
|
/// The feature is based on Zookeeper 3.5.9, details is in https://zookeeper.apache.org/doc/r3.5.9/zookeeperAdmin.html#sc_zkCommands.
|
||||||
|
/// Also we add some additional commands such as csnp, lgif etc.
|
||||||
struct IFourLetterCommand
|
struct IFourLetterCommand
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -327,4 +328,40 @@ struct ApiVersionCommand : public IFourLetterCommand
|
|||||||
String run() override;
|
String run() override;
|
||||||
~ApiVersionCommand() override = default;
|
~ApiVersionCommand() override = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Create snapshot manually
|
||||||
|
struct CreateSnapshotCommand : public IFourLetterCommand
|
||||||
|
{
|
||||||
|
explicit CreateSnapshotCommand(KeeperDispatcher & keeper_dispatcher_)
|
||||||
|
: IFourLetterCommand(keeper_dispatcher_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
String name() override { return "csnp"; }
|
||||||
|
String run() override;
|
||||||
|
~CreateSnapshotCommand() override = default;
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Raft log information:
|
||||||
|
* first_log_idx 1
|
||||||
|
* first_log_term 1
|
||||||
|
* last_log_idx 101
|
||||||
|
* last_log_term 1
|
||||||
|
* last_committed_idx 100
|
||||||
|
* leader_committed_log_idx 101
|
||||||
|
* target_committed_log_idx 101
|
||||||
|
* last_snapshot_idx 50
|
||||||
|
*/
|
||||||
|
struct LogInfoCommand : public IFourLetterCommand
|
||||||
|
{
|
||||||
|
explicit LogInfoCommand(KeeperDispatcher & keeper_dispatcher_)
|
||||||
|
: IFourLetterCommand(keeper_dispatcher_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
String name() override { return "lgif"; }
|
||||||
|
String run() override;
|
||||||
|
~LogInfoCommand() override = default;
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -47,4 +47,32 @@ struct Keeper4LWInfo
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Keeper log information for 4lw commands
|
||||||
|
struct KeeperLogInfo
|
||||||
|
{
|
||||||
|
/// My first log index in log store.
|
||||||
|
uint64_t first_log_idx;
|
||||||
|
|
||||||
|
/// My first log term.
|
||||||
|
uint64_t first_log_term;
|
||||||
|
|
||||||
|
/// My last log index in log store.
|
||||||
|
uint64_t last_log_idx;
|
||||||
|
|
||||||
|
/// My last log term.
|
||||||
|
uint64_t last_log_term;
|
||||||
|
|
||||||
|
/// My last committed log index in state machine.
|
||||||
|
uint64_t last_committed_log_idx;
|
||||||
|
|
||||||
|
/// Leader's committed log index from my perspective.
|
||||||
|
uint64_t leader_committed_log_idx;
|
||||||
|
|
||||||
|
/// Target log index should be committed to.
|
||||||
|
uint64_t target_committed_log_idx;
|
||||||
|
|
||||||
|
/// The largest committed log index in last snapshot.
|
||||||
|
uint64_t last_snapshot_idx;
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -203,6 +203,18 @@ public:
|
|||||||
{
|
{
|
||||||
keeper_stats.reset();
|
keeper_stats.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create snapshot manually, return the last committed log index in the snapshot
|
||||||
|
uint64_t createSnapshot()
|
||||||
|
{
|
||||||
|
return server->createSnapshot();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get Raft information
|
||||||
|
KeeperLogInfo getKeeperLogInfo()
|
||||||
|
{
|
||||||
|
return server->getKeeperLogInfo();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -907,4 +907,29 @@ Keeper4LWInfo KeeperServer::getPartiallyFilled4LWInfo() const
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t KeeperServer::createSnapshot()
|
||||||
|
{
|
||||||
|
uint64_t log_idx = raft_instance->create_snapshot();
|
||||||
|
if (log_idx != 0)
|
||||||
|
LOG_INFO(log, "Snapshot creation scheduled with last committed log index {}.", log_idx);
|
||||||
|
else
|
||||||
|
LOG_WARNING(log, "Failed to schedule snapshot creation task.");
|
||||||
|
return log_idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
KeeperLogInfo KeeperServer::getKeeperLogInfo()
|
||||||
|
{
|
||||||
|
KeeperLogInfo log_info;
|
||||||
|
auto log_store = state_manager->load_log_store();
|
||||||
|
log_info.first_log_idx = log_store->start_index();
|
||||||
|
log_info.first_log_term = log_store->term_at(log_info.first_log_idx);
|
||||||
|
log_info.last_log_idx = raft_instance->get_last_log_idx();
|
||||||
|
log_info.last_log_term = raft_instance->get_last_log_term();
|
||||||
|
log_info.last_committed_log_idx = raft_instance->get_committed_log_idx();
|
||||||
|
log_info.leader_committed_log_idx = raft_instance->get_leader_committed_log_idx();
|
||||||
|
log_info.target_committed_log_idx = raft_instance->get_target_committed_log_idx();
|
||||||
|
log_info.last_snapshot_idx = raft_instance->get_last_snapshot_idx();
|
||||||
|
return log_info;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -131,6 +131,10 @@ public:
|
|||||||
/// Wait configuration update for action. Used by followers.
|
/// Wait configuration update for action. Used by followers.
|
||||||
/// Return true if update was successfully received.
|
/// Return true if update was successfully received.
|
||||||
bool waitConfigurationUpdate(const ConfigUpdateAction & task);
|
bool waitConfigurationUpdate(const ConfigUpdateAction & task);
|
||||||
|
|
||||||
|
uint64_t createSnapshot();
|
||||||
|
|
||||||
|
KeeperLogInfo getKeeperLogInfo();
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -377,6 +377,9 @@ void KeeperStorage::UncommittedState::commit(int64_t commit_zxid)
|
|||||||
{
|
{
|
||||||
assert(deltas.empty() || deltas.front().zxid >= commit_zxid);
|
assert(deltas.empty() || deltas.front().zxid >= commit_zxid);
|
||||||
|
|
||||||
|
// collect nodes that have no further modification in the current transaction
|
||||||
|
std::unordered_set<std::string> modified_nodes;
|
||||||
|
|
||||||
while (!deltas.empty() && deltas.front().zxid == commit_zxid)
|
while (!deltas.empty() && deltas.front().zxid == commit_zxid)
|
||||||
{
|
{
|
||||||
if (std::holds_alternative<SubDeltaEnd>(deltas.front().operation))
|
if (std::holds_alternative<SubDeltaEnd>(deltas.front().operation))
|
||||||
@ -393,7 +396,17 @@ void KeeperStorage::UncommittedState::commit(int64_t commit_zxid)
|
|||||||
assert(path_deltas.front() == &front_delta);
|
assert(path_deltas.front() == &front_delta);
|
||||||
path_deltas.pop_front();
|
path_deltas.pop_front();
|
||||||
if (path_deltas.empty())
|
if (path_deltas.empty())
|
||||||
|
{
|
||||||
deltas_for_path.erase(front_delta.path);
|
deltas_for_path.erase(front_delta.path);
|
||||||
|
|
||||||
|
// no more deltas for path -> no modification
|
||||||
|
modified_nodes.insert(std::move(front_delta.path));
|
||||||
|
}
|
||||||
|
else if (path_deltas.front()->zxid > commit_zxid)
|
||||||
|
{
|
||||||
|
// next delta has a zxid from a different transaction -> no modification in this transaction
|
||||||
|
modified_nodes.insert(std::move(front_delta.path));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else if (auto * add_auth = std::get_if<AddAuthDelta>(&front_delta.operation))
|
else if (auto * add_auth = std::get_if<AddAuthDelta>(&front_delta.operation))
|
||||||
{
|
{
|
||||||
@ -409,9 +422,12 @@ void KeeperStorage::UncommittedState::commit(int64_t commit_zxid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// delete all cached nodes that were not modified after the commit_zxid
|
// delete all cached nodes that were not modified after the commit_zxid
|
||||||
// the commit can end on SubDeltaEnd so we don't want to clear cached nodes too soon
|
// we only need to check the nodes that were modified in this transaction
|
||||||
if (deltas.empty() || deltas.front().zxid > commit_zxid)
|
for (const auto & node : modified_nodes)
|
||||||
std::erase_if(nodes, [commit_zxid](const auto & node) { return node.second.zxid == commit_zxid; });
|
{
|
||||||
|
if (nodes[node].zxid == commit_zxid)
|
||||||
|
nodes.erase(node);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KeeperStorage::UncommittedState::rollback(int64_t rollback_zxid)
|
void KeeperStorage::UncommittedState::rollback(int64_t rollback_zxid)
|
||||||
|
@ -443,6 +443,11 @@ ASTPtr DatabasePostgreSQL::getColumnDeclaration(const DataTypePtr & data_type) c
|
|||||||
if (which.isArray())
|
if (which.isArray())
|
||||||
return makeASTFunction("Array", getColumnDeclaration(typeid_cast<const DataTypeArray *>(data_type.get())->getNestedType()));
|
return makeASTFunction("Array", getColumnDeclaration(typeid_cast<const DataTypeArray *>(data_type.get())->getNestedType()));
|
||||||
|
|
||||||
|
if (which.isDateTime64())
|
||||||
|
{
|
||||||
|
return makeASTFunction("DateTime64", std::make_shared<ASTLiteral>(static_cast<UInt32>(6)));
|
||||||
|
}
|
||||||
|
|
||||||
return std::make_shared<ASTIdentifier>(data_type->getName());
|
return std::make_shared<ASTIdentifier>(data_type->getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ enum class DataSourceType
|
|||||||
Local,
|
Local,
|
||||||
RAM,
|
RAM,
|
||||||
S3,
|
S3,
|
||||||
|
S3_Plain,
|
||||||
HDFS,
|
HDFS,
|
||||||
WebServer,
|
WebServer,
|
||||||
AzureBlobStorage,
|
AzureBlobStorage,
|
||||||
@ -26,6 +27,8 @@ inline String toString(DataSourceType data_source_type)
|
|||||||
return "memory";
|
return "memory";
|
||||||
case DataSourceType::S3:
|
case DataSourceType::S3:
|
||||||
return "s3";
|
return "s3";
|
||||||
|
case DataSourceType::S3_Plain:
|
||||||
|
return "s3_plain";
|
||||||
case DataSourceType::HDFS:
|
case DataSourceType::HDFS:
|
||||||
return "hdfs";
|
return "hdfs";
|
||||||
case DataSourceType::WebServer:
|
case DataSourceType::WebServer:
|
||||||
|
@ -213,7 +213,9 @@ public:
|
|||||||
template <class ...Args>
|
template <class ...Args>
|
||||||
S3PlainObjectStorage(Args && ...args)
|
S3PlainObjectStorage(Args && ...args)
|
||||||
: S3ObjectStorage("S3PlainObjectStorage", std::forward<Args>(args)...)
|
: S3ObjectStorage("S3PlainObjectStorage", std::forward<Args>(args)...)
|
||||||
{}
|
{
|
||||||
|
data_source_description.type = DataSourceType::S3_Plain;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
86
src/Functions/ascii.cpp
Normal file
86
src/Functions/ascii.cpp
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Functions/FunctionStringOrArrayToT.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct AsciiName
|
||||||
|
{
|
||||||
|
static constexpr auto name = "ascii";
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct AsciiImpl
|
||||||
|
{
|
||||||
|
static constexpr auto is_fixed_to_constant = false;
|
||||||
|
using ReturnType = Int32;
|
||||||
|
|
||||||
|
|
||||||
|
static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray<ReturnType> & res)
|
||||||
|
{
|
||||||
|
size_t size = offsets.size();
|
||||||
|
|
||||||
|
ColumnString::Offset prev_offset = 0;
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
{
|
||||||
|
res[i] = doAscii(data, prev_offset, offsets[i] - prev_offset - 1);
|
||||||
|
prev_offset = offsets[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
[[noreturn]] static void vectorFixedToConstant(const ColumnString::Chars & /*data*/, size_t /*n*/, Int32 & /*res*/)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "vectorFixedToConstant not implemented for function {}", AsciiName::name);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray<ReturnType> & res)
|
||||||
|
{
|
||||||
|
size_t size = data.size() / n;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
{
|
||||||
|
res[i] = doAscii(data, i * n, n);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
[[noreturn]] static void array(const ColumnString::Offsets & /*offsets*/, PaddedPODArray<ReturnType> & /*res*/)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to Array argument", AsciiName::name);
|
||||||
|
}
|
||||||
|
|
||||||
|
[[noreturn]] static void uuid(const ColumnUUID::Container & /*offsets*/, size_t /*n*/, PaddedPODArray<ReturnType> & /*res*/)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to UUID argument", AsciiName::name);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static Int32 doAscii(const ColumnString::Chars & buf, size_t offset, size_t size)
|
||||||
|
{
|
||||||
|
return size ? static_cast<ReturnType>(buf[offset]) : 0;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
using FunctionAscii = FunctionStringOrArrayToT<AsciiImpl, AsciiName, AsciiImpl::ReturnType>;
|
||||||
|
|
||||||
|
REGISTER_FUNCTION(Ascii)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionAscii>(
|
||||||
|
{
|
||||||
|
R"(
|
||||||
|
Returns the ASCII code point of the first character of str. The result type is Int32.
|
||||||
|
|
||||||
|
If s is empty, the result is 0. If the first character is not an ASCII character or not part of the Latin-1 Supplement range of UTF-16, the result is undefined)
|
||||||
|
)",
|
||||||
|
Documentation::Examples{{"ascii", "SELECT ascii('234')"}},
|
||||||
|
Documentation::Categories{"String"}
|
||||||
|
}, FunctionFactory::CaseInsensitive);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
35
src/Functions/formatReadableDecimalSize.cpp
Normal file
35
src/Functions/formatReadableDecimalSize.cpp
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Functions/formatReadable.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
struct Impl
|
||||||
|
{
|
||||||
|
static constexpr auto name = "formatReadableDecimalSize";
|
||||||
|
|
||||||
|
static void format(double value, DB::WriteBuffer & out)
|
||||||
|
{
|
||||||
|
formatReadableSizeWithDecimalSuffix(value, out);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
REGISTER_FUNCTION(FormatReadableDecimalSize)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionFormatReadable<Impl>>(
|
||||||
|
{
|
||||||
|
R"(
|
||||||
|
Accepts the size (number of bytes). Returns a rounded size with a suffix (KB, MB, etc.) as a string.
|
||||||
|
)",
|
||||||
|
Documentation::Examples{
|
||||||
|
{"formatReadableDecimalSize", "SELECT formatReadableDecimalSize(1000)"}},
|
||||||
|
Documentation::Categories{"OtherFunctions"}
|
||||||
|
},
|
||||||
|
FunctionFactory::CaseSensitive);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -86,6 +86,7 @@ NamesAndTypesList SessionLogElement::getNamesAndTypes()
|
|||||||
AUTH_TYPE_NAME_AND_VALUE(AuthType::DOUBLE_SHA1_PASSWORD),
|
AUTH_TYPE_NAME_AND_VALUE(AuthType::DOUBLE_SHA1_PASSWORD),
|
||||||
AUTH_TYPE_NAME_AND_VALUE(AuthType::LDAP),
|
AUTH_TYPE_NAME_AND_VALUE(AuthType::LDAP),
|
||||||
AUTH_TYPE_NAME_AND_VALUE(AuthType::KERBEROS),
|
AUTH_TYPE_NAME_AND_VALUE(AuthType::KERBEROS),
|
||||||
|
AUTH_TYPE_NAME_AND_VALUE(AuthType::SSL_CERTIFICATE),
|
||||||
});
|
});
|
||||||
#undef AUTH_TYPE_NAME_AND_VALUE
|
#undef AUTH_TYPE_NAME_AND_VALUE
|
||||||
static_assert(static_cast<int>(AuthenticationType::MAX) == 7);
|
static_assert(static_cast<int>(AuthenticationType::MAX) == 7);
|
||||||
|
@ -1197,6 +1197,9 @@ public:
|
|||||||
if (!mergeElement())
|
if (!mergeElement())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (elements.size() != 2)
|
||||||
|
return false;
|
||||||
|
|
||||||
elements = {makeASTFunction("CAST", elements[0], elements[1])};
|
elements = {makeASTFunction("CAST", elements[0], elements[1])};
|
||||||
finished = true;
|
finished = true;
|
||||||
return true;
|
return true;
|
||||||
@ -1406,7 +1409,7 @@ public:
|
|||||||
protected:
|
protected:
|
||||||
bool getResultImpl(ASTPtr & node) override
|
bool getResultImpl(ASTPtr & node) override
|
||||||
{
|
{
|
||||||
if (state == 2)
|
if (state == 2 && elements.size() == 2)
|
||||||
std::swap(elements[1], elements[0]);
|
std::swap(elements[1], elements[0]);
|
||||||
|
|
||||||
node = makeASTFunction("position", std::move(elements));
|
node = makeASTFunction("position", std::move(elements));
|
||||||
|
@ -5426,6 +5426,7 @@ static void selectBestProjection(
|
|||||||
|
|
||||||
auto projection_result_ptr = reader.estimateNumMarksToRead(
|
auto projection_result_ptr = reader.estimateNumMarksToRead(
|
||||||
projection_parts,
|
projection_parts,
|
||||||
|
candidate.prewhere_info,
|
||||||
candidate.required_columns,
|
candidate.required_columns,
|
||||||
storage_snapshot->metadata,
|
storage_snapshot->metadata,
|
||||||
candidate.desc->metadata,
|
candidate.desc->metadata,
|
||||||
@ -5449,6 +5450,7 @@ static void selectBestProjection(
|
|||||||
{
|
{
|
||||||
auto normal_result_ptr = reader.estimateNumMarksToRead(
|
auto normal_result_ptr = reader.estimateNumMarksToRead(
|
||||||
normal_parts,
|
normal_parts,
|
||||||
|
query_info.prewhere_info,
|
||||||
required_columns,
|
required_columns,
|
||||||
storage_snapshot->metadata,
|
storage_snapshot->metadata,
|
||||||
storage_snapshot->metadata,
|
storage_snapshot->metadata,
|
||||||
@ -5783,7 +5785,6 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
|
|||||||
const auto & analysis_result = select.getAnalysisResult();
|
const auto & analysis_result = select.getAnalysisResult();
|
||||||
|
|
||||||
query_info.prepared_sets = select.getQueryAnalyzer()->getPreparedSets();
|
query_info.prepared_sets = select.getQueryAnalyzer()->getPreparedSets();
|
||||||
query_info.prewhere_info = analysis_result.prewhere_info;
|
|
||||||
|
|
||||||
const auto & before_where = analysis_result.before_where;
|
const auto & before_where = analysis_result.before_where;
|
||||||
const auto & where_column_name = analysis_result.where_column_name;
|
const auto & where_column_name = analysis_result.where_column_name;
|
||||||
@ -6060,6 +6061,7 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
|
|||||||
{
|
{
|
||||||
auto normal_result_ptr = reader.estimateNumMarksToRead(
|
auto normal_result_ptr = reader.estimateNumMarksToRead(
|
||||||
normal_parts,
|
normal_parts,
|
||||||
|
query_info.prewhere_info,
|
||||||
analysis_result.required_columns,
|
analysis_result.required_columns,
|
||||||
metadata_snapshot,
|
metadata_snapshot,
|
||||||
metadata_snapshot,
|
metadata_snapshot,
|
||||||
@ -6092,6 +6094,7 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
|
|||||||
{
|
{
|
||||||
query_info.merge_tree_select_result_ptr = reader.estimateNumMarksToRead(
|
query_info.merge_tree_select_result_ptr = reader.estimateNumMarksToRead(
|
||||||
parts,
|
parts,
|
||||||
|
query_info.prewhere_info,
|
||||||
analysis_result.required_columns,
|
analysis_result.required_columns,
|
||||||
metadata_snapshot,
|
metadata_snapshot,
|
||||||
metadata_snapshot,
|
metadata_snapshot,
|
||||||
@ -6173,8 +6176,6 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
|
|||||||
selected_candidate->aggregate_descriptions = select.getQueryAnalyzer()->aggregates();
|
selected_candidate->aggregate_descriptions = select.getQueryAnalyzer()->aggregates();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Just in case, reset prewhere info calculated from projection.
|
|
||||||
query_info.prewhere_info.reset();
|
|
||||||
return *selected_candidate;
|
return *selected_candidate;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,6 +214,14 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge(
|
|||||||
/// Previous part only in boundaries of partition frame
|
/// Previous part only in boundaries of partition frame
|
||||||
const MergeTreeData::DataPartPtr * prev_part = nullptr;
|
const MergeTreeData::DataPartPtr * prev_part = nullptr;
|
||||||
|
|
||||||
|
/// collect min_age for each partition while iterating parts
|
||||||
|
struct PartitionInfo
|
||||||
|
{
|
||||||
|
time_t min_age{std::numeric_limits<time_t>::max()};
|
||||||
|
};
|
||||||
|
|
||||||
|
std::unordered_map<std::string, PartitionInfo> partitions_info;
|
||||||
|
|
||||||
size_t parts_selected_precondition = 0;
|
size_t parts_selected_precondition = 0;
|
||||||
for (const MergeTreeData::DataPartPtr & part : data_parts)
|
for (const MergeTreeData::DataPartPtr & part : data_parts)
|
||||||
{
|
{
|
||||||
@ -277,6 +285,9 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge(
|
|||||||
part_info.compression_codec_desc = part->default_codec->getFullCodecDesc();
|
part_info.compression_codec_desc = part->default_codec->getFullCodecDesc();
|
||||||
part_info.shall_participate_in_merges = has_volumes_with_disabled_merges ? part->shallParticipateInMerges(storage_policy) : true;
|
part_info.shall_participate_in_merges = has_volumes_with_disabled_merges ? part->shallParticipateInMerges(storage_policy) : true;
|
||||||
|
|
||||||
|
auto & partition_info = partitions_info[partition_id];
|
||||||
|
partition_info.min_age = std::min(partition_info.min_age, part_info.age);
|
||||||
|
|
||||||
++parts_selected_precondition;
|
++parts_selected_precondition;
|
||||||
|
|
||||||
parts_ranges.back().emplace_back(part_info);
|
parts_ranges.back().emplace_back(part_info);
|
||||||
@ -333,7 +344,8 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge(
|
|||||||
SimpleMergeSelector::Settings merge_settings;
|
SimpleMergeSelector::Settings merge_settings;
|
||||||
/// Override value from table settings
|
/// Override value from table settings
|
||||||
merge_settings.max_parts_to_merge_at_once = data_settings->max_parts_to_merge_at_once;
|
merge_settings.max_parts_to_merge_at_once = data_settings->max_parts_to_merge_at_once;
|
||||||
merge_settings.min_age_to_force_merge = data_settings->min_age_to_force_merge_seconds;
|
if (!data_settings->min_age_to_force_merge_on_partition_only)
|
||||||
|
merge_settings.min_age_to_force_merge = data_settings->min_age_to_force_merge_seconds;
|
||||||
|
|
||||||
if (aggressive)
|
if (aggressive)
|
||||||
merge_settings.base = 1;
|
merge_settings.base = 1;
|
||||||
@ -347,6 +359,20 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge(
|
|||||||
|
|
||||||
if (parts_to_merge.empty())
|
if (parts_to_merge.empty())
|
||||||
{
|
{
|
||||||
|
if (data_settings->min_age_to_force_merge_on_partition_only && data_settings->min_age_to_force_merge_seconds)
|
||||||
|
{
|
||||||
|
auto best_partition_it = std::max_element(
|
||||||
|
partitions_info.begin(),
|
||||||
|
partitions_info.end(),
|
||||||
|
[](const auto & e1, const auto & e2) { return e1.second.min_age < e2.second.min_age; });
|
||||||
|
|
||||||
|
assert(best_partition_it != partitions_info.end());
|
||||||
|
|
||||||
|
if (static_cast<size_t>(best_partition_it->second.min_age) >= data_settings->min_age_to_force_merge_seconds)
|
||||||
|
return selectAllPartsToMergeWithinPartition(
|
||||||
|
future_part, can_merge_callback, best_partition_it->first, true, metadata_snapshot, txn, out_disable_reason);
|
||||||
|
}
|
||||||
|
|
||||||
if (out_disable_reason)
|
if (out_disable_reason)
|
||||||
*out_disable_reason = "There is no need to merge parts according to merge selector algorithm";
|
*out_disable_reason = "There is no need to merge parts according to merge selector algorithm";
|
||||||
return SelectPartsDecision::CANNOT_SELECT;
|
return SelectPartsDecision::CANNOT_SELECT;
|
||||||
|
@ -1294,6 +1294,7 @@ static void selectColumnNames(
|
|||||||
|
|
||||||
MergeTreeDataSelectAnalysisResultPtr MergeTreeDataSelectExecutor::estimateNumMarksToRead(
|
MergeTreeDataSelectAnalysisResultPtr MergeTreeDataSelectExecutor::estimateNumMarksToRead(
|
||||||
MergeTreeData::DataPartsVector parts,
|
MergeTreeData::DataPartsVector parts,
|
||||||
|
const PrewhereInfoPtr & prewhere_info,
|
||||||
const Names & column_names_to_return,
|
const Names & column_names_to_return,
|
||||||
const StorageMetadataPtr & metadata_snapshot_base,
|
const StorageMetadataPtr & metadata_snapshot_base,
|
||||||
const StorageMetadataPtr & metadata_snapshot,
|
const StorageMetadataPtr & metadata_snapshot,
|
||||||
@ -1318,7 +1319,7 @@ MergeTreeDataSelectAnalysisResultPtr MergeTreeDataSelectExecutor::estimateNumMar
|
|||||||
|
|
||||||
return ReadFromMergeTree::selectRangesToRead(
|
return ReadFromMergeTree::selectRangesToRead(
|
||||||
std::move(parts),
|
std::move(parts),
|
||||||
query_info.prewhere_info,
|
prewhere_info,
|
||||||
added_filter_nodes,
|
added_filter_nodes,
|
||||||
metadata_snapshot_base,
|
metadata_snapshot_base,
|
||||||
metadata_snapshot,
|
metadata_snapshot,
|
||||||
|
@ -56,6 +56,7 @@ public:
|
|||||||
/// This method is used to select best projection for table.
|
/// This method is used to select best projection for table.
|
||||||
MergeTreeDataSelectAnalysisResultPtr estimateNumMarksToRead(
|
MergeTreeDataSelectAnalysisResultPtr estimateNumMarksToRead(
|
||||||
MergeTreeData::DataPartsVector parts,
|
MergeTreeData::DataPartsVector parts,
|
||||||
|
const PrewhereInfoPtr & prewhere_info,
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const StorageMetadataPtr & metadata_snapshot_base,
|
const StorageMetadataPtr & metadata_snapshot_base,
|
||||||
const StorageMetadataPtr & metadata_snapshot,
|
const StorageMetadataPtr & metadata_snapshot,
|
||||||
|
@ -63,6 +63,7 @@ struct Settings;
|
|||||||
M(UInt64, merge_tree_clear_old_parts_interval_seconds, 1, "The period of executing the clear old parts operation in background.", 0) \
|
M(UInt64, merge_tree_clear_old_parts_interval_seconds, 1, "The period of executing the clear old parts operation in background.", 0) \
|
||||||
M(UInt64, merge_tree_clear_old_broken_detached_parts_ttl_timeout_seconds, 1ULL * 3600 * 24 * 30, "Remove old broken detached parts in the background if they remained intouched for a specified by this setting period of time.", 0) \
|
M(UInt64, merge_tree_clear_old_broken_detached_parts_ttl_timeout_seconds, 1ULL * 3600 * 24 * 30, "Remove old broken detached parts in the background if they remained intouched for a specified by this setting period of time.", 0) \
|
||||||
M(UInt64, min_age_to_force_merge_seconds, 0, "If all parts in a certain range are older than this value, range will be always eligible for merging. Set to 0 to disable.", 0) \
|
M(UInt64, min_age_to_force_merge_seconds, 0, "If all parts in a certain range are older than this value, range will be always eligible for merging. Set to 0 to disable.", 0) \
|
||||||
|
M(Bool, min_age_to_force_merge_on_partition_only, false, "Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset.", false) \
|
||||||
M(UInt64, merge_tree_enable_clear_old_broken_detached, false, "Enable clearing old broken detached parts operation in background.", 0) \
|
M(UInt64, merge_tree_enable_clear_old_broken_detached, false, "Enable clearing old broken detached parts operation in background.", 0) \
|
||||||
M(Bool, remove_rolled_back_parts_immediately, 1, "Setting for an incomplete experimental feature.", 0) \
|
M(Bool, remove_rolled_back_parts_immediately, 1, "Setting for an incomplete experimental feature.", 0) \
|
||||||
\
|
\
|
||||||
|
@ -87,7 +87,7 @@ class PRInfo:
|
|||||||
self.body = ""
|
self.body = ""
|
||||||
self.diff_urls = []
|
self.diff_urls = []
|
||||||
self.release_pr = 0
|
self.release_pr = 0
|
||||||
ref = github_event.get("ref", "refs/head/master")
|
ref = github_event.get("ref", "refs/heads/master")
|
||||||
if ref and ref.startswith("refs/heads/"):
|
if ref and ref.startswith("refs/heads/"):
|
||||||
ref = ref[11:]
|
ref = ref[11:]
|
||||||
|
|
||||||
|
@ -447,6 +447,7 @@
|
|||||||
"FORMAT"
|
"FORMAT"
|
||||||
"formatDateTime"
|
"formatDateTime"
|
||||||
"formatReadableQuantity"
|
"formatReadableQuantity"
|
||||||
|
"formatReadableDecimalSize"
|
||||||
"formatReadableSize"
|
"formatReadableSize"
|
||||||
"formatReadableTimeDelta"
|
"formatReadableTimeDelta"
|
||||||
"formatRow"
|
"formatRow"
|
||||||
|
@ -399,6 +399,7 @@
|
|||||||
"demangle"
|
"demangle"
|
||||||
"toNullable"
|
"toNullable"
|
||||||
"concat"
|
"concat"
|
||||||
|
"formatReadableDecimalSize"
|
||||||
"formatReadableSize"
|
"formatReadableSize"
|
||||||
"shardCount"
|
"shardCount"
|
||||||
"fromModifiedJulianDayOrNull"
|
"fromModifiedJulianDayOrNull"
|
||||||
|
@ -50,6 +50,24 @@ def check_backup_and_restore(storage_policy, backup_destination, size=1000):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def check_system_tables():
|
||||||
|
disks = [
|
||||||
|
tuple(disk.split("\t"))
|
||||||
|
for disk in node.query("SELECT name, type FROM system.disks").split("\n")
|
||||||
|
if disk
|
||||||
|
]
|
||||||
|
expected_disks = (
|
||||||
|
("default", "local"),
|
||||||
|
("disk_s3", "s3"),
|
||||||
|
("disk_s3_other_bucket", "s3"),
|
||||||
|
("disk_s3_plain", "s3_plain"),
|
||||||
|
)
|
||||||
|
assert len(expected_disks) == len(disks)
|
||||||
|
for expected_disk in expected_disks:
|
||||||
|
if expected_disk not in disks:
|
||||||
|
raise AssertionError(f"Missed {expected_disk} in {disks}")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"storage_policy, to_disk",
|
"storage_policy, to_disk",
|
||||||
[
|
[
|
||||||
@ -93,6 +111,7 @@ def test_backup_to_s3():
|
|||||||
f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')"
|
f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')"
|
||||||
)
|
)
|
||||||
check_backup_and_restore(storage_policy, backup_destination)
|
check_backup_and_restore(storage_policy, backup_destination)
|
||||||
|
check_system_tables()
|
||||||
|
|
||||||
|
|
||||||
def test_backup_to_s3_named_collection():
|
def test_backup_to_s3_named_collection():
|
||||||
|
@ -596,3 +596,48 @@ def test_cmd_wchp(started_cluster):
|
|||||||
assert "/test_4lw_normal_node_1" in list_data
|
assert "/test_4lw_normal_node_1" in list_data
|
||||||
finally:
|
finally:
|
||||||
destroy_zk_client(zk)
|
destroy_zk_client(zk)
|
||||||
|
|
||||||
|
|
||||||
|
def test_cmd_csnp(started_cluster):
|
||||||
|
zk = None
|
||||||
|
try:
|
||||||
|
wait_nodes()
|
||||||
|
zk = get_fake_zk(node1.name, timeout=30.0)
|
||||||
|
data = keeper_utils.send_4lw_cmd(cluster, node1, cmd="csnp")
|
||||||
|
try:
|
||||||
|
int(data)
|
||||||
|
assert True
|
||||||
|
except ValueError:
|
||||||
|
assert False
|
||||||
|
finally:
|
||||||
|
destroy_zk_client(zk)
|
||||||
|
|
||||||
|
|
||||||
|
def test_cmd_lgif(started_cluster):
|
||||||
|
zk = None
|
||||||
|
try:
|
||||||
|
wait_nodes()
|
||||||
|
clear_znodes()
|
||||||
|
|
||||||
|
zk = get_fake_zk(node1.name, timeout=30.0)
|
||||||
|
do_some_action(zk, create_cnt=100)
|
||||||
|
|
||||||
|
data = keeper_utils.send_4lw_cmd(cluster, node1, cmd="lgif")
|
||||||
|
print(data)
|
||||||
|
reader = csv.reader(data.split("\n"), delimiter="\t")
|
||||||
|
result = {}
|
||||||
|
|
||||||
|
for row in reader:
|
||||||
|
if len(row) != 0:
|
||||||
|
result[row[0]] = row[1]
|
||||||
|
|
||||||
|
assert int(result["first_log_idx"]) == 1
|
||||||
|
assert int(result["first_log_term"]) == 1
|
||||||
|
assert int(result["last_log_idx"]) >= 1
|
||||||
|
assert int(result["last_log_term"]) == 1
|
||||||
|
assert int(result["last_committed_log_idx"]) >= 1
|
||||||
|
assert int(result["leader_committed_log_idx"]) >= 1
|
||||||
|
assert int(result["target_committed_log_idx"]) >= 1
|
||||||
|
assert int(result["last_snapshot_idx"]) >= 1
|
||||||
|
finally:
|
||||||
|
destroy_zk_client(zk)
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
<clickhouse>
|
|
||||||
<zookeeper>
|
|
||||||
<node index="1">
|
|
||||||
<host>zoo1</host>
|
|
||||||
<port>2181</port>
|
|
||||||
</node>
|
|
||||||
</zookeeper>
|
|
||||||
</clickhouse>
|
|
@ -1,88 +0,0 @@
|
|||||||
import pytest
|
|
||||||
import time
|
|
||||||
from helpers.client import QueryRuntimeException
|
|
||||||
from helpers.cluster import ClickHouseCluster
|
|
||||||
from helpers.test_tools import TSV
|
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
|
||||||
node = cluster.add_instance(
|
|
||||||
"node",
|
|
||||||
main_configs=["configs/zookeeper_config.xml"],
|
|
||||||
with_zookeeper=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
|
||||||
def start_cluster():
|
|
||||||
try:
|
|
||||||
cluster.start()
|
|
||||||
|
|
||||||
yield cluster
|
|
||||||
finally:
|
|
||||||
cluster.shutdown()
|
|
||||||
|
|
||||||
|
|
||||||
def get_part_number(table_name):
|
|
||||||
return TSV(
|
|
||||||
node.query(
|
|
||||||
f"SELECT count(*) FROM system.parts where table='{table_name}' and active=1"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def check_expected_part_number(seconds, table_name, expected):
|
|
||||||
ok = False
|
|
||||||
for i in range(int(seconds) * 2):
|
|
||||||
result = get_part_number(table_name)
|
|
||||||
if result == expected:
|
|
||||||
ok = True
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
time.sleep(1)
|
|
||||||
assert ok
|
|
||||||
|
|
||||||
|
|
||||||
def test_without_force_merge_old_parts(start_cluster):
|
|
||||||
node.query(
|
|
||||||
"CREATE TABLE test_without_merge (i Int64) ENGINE = MergeTree ORDER BY i;"
|
|
||||||
)
|
|
||||||
node.query("INSERT INTO test_without_merge SELECT 1")
|
|
||||||
node.query("INSERT INTO test_without_merge SELECT 2")
|
|
||||||
node.query("INSERT INTO test_without_merge SELECT 3")
|
|
||||||
|
|
||||||
expected = TSV("""3\n""")
|
|
||||||
# verify that the parts don't get merged
|
|
||||||
for i in range(10):
|
|
||||||
if get_part_number("test_without_merge") != expected:
|
|
||||||
assert False
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
node.query("DROP TABLE test_without_merge;")
|
|
||||||
|
|
||||||
|
|
||||||
def test_force_merge_old_parts(start_cluster):
|
|
||||||
node.query(
|
|
||||||
"CREATE TABLE test_with_merge (i Int64) ENGINE = MergeTree ORDER BY i SETTINGS min_age_to_force_merge_seconds=5;"
|
|
||||||
)
|
|
||||||
node.query("INSERT INTO test_with_merge SELECT 1")
|
|
||||||
node.query("INSERT INTO test_with_merge SELECT 2")
|
|
||||||
node.query("INSERT INTO test_with_merge SELECT 3")
|
|
||||||
|
|
||||||
expected = TSV("""1\n""")
|
|
||||||
check_expected_part_number(10, "test_with_merge", expected)
|
|
||||||
|
|
||||||
node.query("DROP TABLE test_with_merge;")
|
|
||||||
|
|
||||||
|
|
||||||
def test_force_merge_old_parts_replicated_merge_tree(start_cluster):
|
|
||||||
node.query(
|
|
||||||
"CREATE TABLE test_replicated (i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/testing/test', 'node') ORDER BY i SETTINGS min_age_to_force_merge_seconds=5;"
|
|
||||||
)
|
|
||||||
node.query("INSERT INTO test_replicated SELECT 1")
|
|
||||||
node.query("INSERT INTO test_replicated SELECT 2")
|
|
||||||
node.query("INSERT INTO test_replicated SELECT 3")
|
|
||||||
|
|
||||||
expected = TSV("""1\n""")
|
|
||||||
check_expected_part_number(10, "test_replicated", expected)
|
|
||||||
|
|
||||||
node.query("DROP TABLE test_replicated;")
|
|
@ -693,6 +693,19 @@ def test_auto_close_connection(started_cluster):
|
|||||||
assert count == 2
|
assert count == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_datetime(started_cluster):
|
||||||
|
cursor = started_cluster.postgres_conn.cursor()
|
||||||
|
cursor.execute("drop table if exists test")
|
||||||
|
cursor.execute("create table test (u timestamp)")
|
||||||
|
|
||||||
|
node1.query("drop database if exists pg")
|
||||||
|
node1.query("create database pg engine = PostgreSQL(postgres1)")
|
||||||
|
assert "DateTime64(6)" in node1.query("show create table pg.test")
|
||||||
|
node1.query("detach table pg.test")
|
||||||
|
node1.query("attach table pg.test")
|
||||||
|
assert "DateTime64(6)" in node1.query("show create table pg.test")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
cluster.start()
|
cluster.start()
|
||||||
input("Cluster created, press any key to destroy...")
|
input("Cluster created, press any key to destroy...")
|
||||||
|
@ -0,0 +1,70 @@
|
|||||||
|
1.00 B 1.00 B 1.00 B
|
||||||
|
2.72 B 2.00 B 2.00 B
|
||||||
|
7.39 B 7.00 B 7.00 B
|
||||||
|
20.09 B 20.00 B 20.00 B
|
||||||
|
54.60 B 54.00 B 54.00 B
|
||||||
|
148.41 B 148.00 B 148.00 B
|
||||||
|
403.43 B 403.00 B 403.00 B
|
||||||
|
1.10 KB 1.10 KB 1.10 KB
|
||||||
|
2.98 KB 2.98 KB 2.98 KB
|
||||||
|
8.10 KB 8.10 KB 8.10 KB
|
||||||
|
22.03 KB 22.03 KB 22.03 KB
|
||||||
|
59.87 KB 59.87 KB 59.87 KB
|
||||||
|
162.75 KB 162.75 KB 162.75 KB
|
||||||
|
442.41 KB 442.41 KB 442.41 KB
|
||||||
|
1.20 MB 1.20 MB 1.20 MB
|
||||||
|
3.27 MB 3.27 MB 3.27 MB
|
||||||
|
8.89 MB 8.89 MB 8.89 MB
|
||||||
|
24.15 MB 24.15 MB 24.15 MB
|
||||||
|
65.66 MB 65.66 MB 65.66 MB
|
||||||
|
178.48 MB 178.48 MB 178.48 MB
|
||||||
|
485.17 MB 485.17 MB 485.17 MB
|
||||||
|
1.32 GB 1.32 GB 1.32 GB
|
||||||
|
3.58 GB 3.58 GB 2.15 GB
|
||||||
|
9.74 GB 9.74 GB 2.15 GB
|
||||||
|
26.49 GB 26.49 GB 2.15 GB
|
||||||
|
72.00 GB 72.00 GB 2.15 GB
|
||||||
|
195.73 GB 195.73 GB 2.15 GB
|
||||||
|
532.05 GB 532.05 GB 2.15 GB
|
||||||
|
1.45 TB 1.45 TB 2.15 GB
|
||||||
|
3.93 TB 3.93 TB 2.15 GB
|
||||||
|
10.69 TB 10.69 TB 2.15 GB
|
||||||
|
29.05 TB 29.05 TB 2.15 GB
|
||||||
|
78.96 TB 78.96 TB 2.15 GB
|
||||||
|
214.64 TB 214.64 TB 2.15 GB
|
||||||
|
583.46 TB 583.46 TB 2.15 GB
|
||||||
|
1.59 PB 1.59 PB 2.15 GB
|
||||||
|
4.31 PB 4.31 PB 2.15 GB
|
||||||
|
11.72 PB 11.72 PB 2.15 GB
|
||||||
|
31.86 PB 31.86 PB 2.15 GB
|
||||||
|
86.59 PB 86.59 PB 2.15 GB
|
||||||
|
235.39 PB 235.39 PB 2.15 GB
|
||||||
|
639.84 PB 639.84 PB 2.15 GB
|
||||||
|
1.74 EB 1.74 EB 2.15 GB
|
||||||
|
4.73 EB 4.73 EB 2.15 GB
|
||||||
|
12.85 EB 12.85 EB 2.15 GB
|
||||||
|
34.93 EB 18.45 EB 2.15 GB
|
||||||
|
94.96 EB 18.45 EB 2.15 GB
|
||||||
|
258.13 EB 18.45 EB 2.15 GB
|
||||||
|
701.67 EB 18.45 EB 2.15 GB
|
||||||
|
1.91 ZB 18.45 EB 2.15 GB
|
||||||
|
5.18 ZB 18.45 EB 2.15 GB
|
||||||
|
14.09 ZB 18.45 EB 2.15 GB
|
||||||
|
38.31 ZB 18.45 EB 2.15 GB
|
||||||
|
104.14 ZB 18.45 EB 2.15 GB
|
||||||
|
283.08 ZB 18.45 EB 2.15 GB
|
||||||
|
769.48 ZB 18.45 EB 2.15 GB
|
||||||
|
2.09 YB 18.45 EB 2.15 GB
|
||||||
|
5.69 YB 18.45 EB 2.15 GB
|
||||||
|
15.46 YB 18.45 EB 2.15 GB
|
||||||
|
42.01 YB 18.45 EB 2.15 GB
|
||||||
|
114.20 YB 18.45 EB 2.15 GB
|
||||||
|
310.43 YB 18.45 EB 2.15 GB
|
||||||
|
843.84 YB 18.45 EB 2.15 GB
|
||||||
|
2293.78 YB 18.45 EB 2.15 GB
|
||||||
|
6235.15 YB 18.45 EB 2.15 GB
|
||||||
|
16948.89 YB 18.45 EB 2.15 GB
|
||||||
|
46071.87 YB 18.45 EB 2.15 GB
|
||||||
|
125236.32 YB 18.45 EB 2.15 GB
|
||||||
|
340427.60 YB 18.45 EB 2.15 GB
|
||||||
|
925378.17 YB 18.45 EB 2.15 GB
|
@ -0,0 +1,4 @@
|
|||||||
|
WITH round(exp(number), 6) AS x, x > 0xFFFFFFFFFFFFFFFF ? 0xFFFFFFFFFFFFFFFF : toUInt64(x) AS y, x > 0x7FFFFFFF ? 0x7FFFFFFF : toInt32(x) AS z
|
||||||
|
SELECT formatReadableDecimalSize(x), formatReadableDecimalSize(y), formatReadableDecimalSize(z)
|
||||||
|
FROM system.numbers
|
||||||
|
LIMIT 70;
|
@ -1,2 +1,3 @@
|
|||||||
1 1 1
|
1 1 1
|
||||||
2 2 2
|
2 2 2
|
||||||
|
1
|
||||||
|
@ -9,3 +9,13 @@ set allow_experimental_projection_optimization = 1, max_rows_to_read = 3;
|
|||||||
select * from t where i < 5 and j in (1, 2);
|
select * from t where i < 5 and j in (1, 2);
|
||||||
|
|
||||||
drop table t;
|
drop table t;
|
||||||
|
|
||||||
|
drop table if exists test;
|
||||||
|
|
||||||
|
create table test (name String, time Int64) engine MergeTree order by time;
|
||||||
|
|
||||||
|
insert into test values ('hello world', 1662336000241);
|
||||||
|
|
||||||
|
select count() from (select fromUnixTimestamp64Milli(time, 'UTC') time_fmt, name from test where time_fmt > '2022-09-05 00:00:00');
|
||||||
|
|
||||||
|
drop table test;
|
||||||
|
@ -0,0 +1,32 @@
|
|||||||
|
#!/usr/bin/expect -f
|
||||||
|
# Tags: long
|
||||||
|
|
||||||
|
# This is the regression for the concurrent access in ProgressIndication,
|
||||||
|
# so it is important to read enough rows here (10e6).
|
||||||
|
#
|
||||||
|
# Initially there was 100e6, but under thread fuzzer 10min may be not enough sometimes,
|
||||||
|
# but I believe that CI will catch possible issues even with less rows anyway.
|
||||||
|
|
||||||
|
set basedir [file dirname $argv0]
|
||||||
|
set basename [file tail $argv0]
|
||||||
|
exp_internal -f $env(CLICKHOUSE_TMP)/$basename.debuglog 0
|
||||||
|
|
||||||
|
log_user 0
|
||||||
|
set timeout 60
|
||||||
|
match_max 100000
|
||||||
|
set stty_init "rows 25 cols 120"
|
||||||
|
|
||||||
|
expect_after {
|
||||||
|
eof { exp_continue }
|
||||||
|
timeout { exit 1 }
|
||||||
|
}
|
||||||
|
|
||||||
|
spawn bash
|
||||||
|
send "source $basedir/../shell_config.sh\r"
|
||||||
|
|
||||||
|
send "yes | head -n10000000 | \$CLICKHOUSE_CLIENT --query \"insert into function null('foo String') format TSV\" >/dev/null\r"
|
||||||
|
expect "Progress: "
|
||||||
|
send "\3"
|
||||||
|
|
||||||
|
send "exit\r"
|
||||||
|
expect eof
|
@ -1,2 +0,0 @@
|
|||||||
0
|
|
||||||
--progress produce some rows
|
|
@ -1,19 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# Tags: long
|
|
||||||
|
|
||||||
# This is the regression for the concurrent access in ProgressIndication,
|
|
||||||
# so it is important to read enough rows here (10e6).
|
|
||||||
#
|
|
||||||
# Initially there was 100e6, but under thread fuzzer 10min may be not enough sometimes,
|
|
||||||
# but I believe that CI will catch possible issues even with less rows anyway.
|
|
||||||
|
|
||||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|
||||||
# shellcheck source=../shell_config.sh
|
|
||||||
. "$CUR_DIR"/../shell_config.sh
|
|
||||||
|
|
||||||
tmp_file_progress="$(mktemp "$CUR_DIR/$CLICKHOUSE_TEST_UNIQUE_NAME.XXXXXX.progress")"
|
|
||||||
trap 'rm $tmp_file_progress' EXIT
|
|
||||||
|
|
||||||
yes | head -n10000000 | $CLICKHOUSE_CLIENT -q "insert into function null('foo String') format TSV" --progress 2> "$tmp_file_progress"
|
|
||||||
echo $?
|
|
||||||
test -s "$tmp_file_progress" && echo "--progress produce some rows" || echo "FAIL: no rows with --progress"
|
|
@ -0,0 +1,32 @@
|
|||||||
|
#!/usr/bin/expect -f
|
||||||
|
# Tags: long
|
||||||
|
|
||||||
|
# This is the regression for the concurrent access in ProgressIndication,
|
||||||
|
# so it is important to read enough rows here (10e6).
|
||||||
|
#
|
||||||
|
# Initially there was 100e6, but under thread fuzzer 10min may be not enough sometimes,
|
||||||
|
# but I believe that CI will catch possible issues even with less rows anyway.
|
||||||
|
|
||||||
|
set basedir [file dirname $argv0]
|
||||||
|
set basename [file tail $argv0]
|
||||||
|
exp_internal -f $env(CLICKHOUSE_TMP)/$basename.debuglog 0
|
||||||
|
|
||||||
|
log_user 0
|
||||||
|
set timeout 60
|
||||||
|
match_max 100000
|
||||||
|
set stty_init "rows 25 cols 120"
|
||||||
|
|
||||||
|
expect_after {
|
||||||
|
eof { exp_continue }
|
||||||
|
timeout { exit 1 }
|
||||||
|
}
|
||||||
|
|
||||||
|
spawn bash
|
||||||
|
send "source $basedir/../shell_config.sh\r"
|
||||||
|
|
||||||
|
send "yes | head -n10000000 | \$CLICKHOUSE_LOCAL --query \"insert into function null('foo String') format TSV\" >/dev/null\r"
|
||||||
|
expect "Progress: "
|
||||||
|
send "\3"
|
||||||
|
|
||||||
|
send "exit\r"
|
||||||
|
expect eof
|
@ -1,2 +0,0 @@
|
|||||||
0
|
|
||||||
--progress produce some rows
|
|
@ -1,19 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# Tags: long
|
|
||||||
|
|
||||||
# This is the regression for the concurrent access in ProgressIndication,
|
|
||||||
# so it is important to read enough rows here (10e6).
|
|
||||||
#
|
|
||||||
# Initially there was 100e6, but under thread fuzzer 10min may be not enough sometimes,
|
|
||||||
# but I believe that CI will catch possible issues even with less rows anyway.
|
|
||||||
|
|
||||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|
||||||
# shellcheck source=../shell_config.sh
|
|
||||||
. "$CUR_DIR"/../shell_config.sh
|
|
||||||
|
|
||||||
tmp_file_progress="$(mktemp "$CUR_DIR/$CLICKHOUSE_TEST_UNIQUE_NAME.XXXXXX.progress")"
|
|
||||||
trap 'rm $tmp_file_progress' EXIT
|
|
||||||
|
|
||||||
yes | head -n10000000 | $CLICKHOUSE_LOCAL -q "insert into function null('foo String') format TSV" --progress 2> "$tmp_file_progress"
|
|
||||||
echo $?
|
|
||||||
test -s "$tmp_file_progress" && echo "--progress produce some rows" || echo "FAIL: no rows with --progress"
|
|
14
tests/queries/0_stateless/02353_ascii.reference
Normal file
14
tests/queries/0_stateless/02353_ascii.reference
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
50
|
||||||
|
0
|
||||||
|
50
|
||||||
|
0
|
||||||
|
48
|
||||||
|
49
|
||||||
|
50
|
||||||
|
51
|
||||||
|
52
|
||||||
|
53
|
||||||
|
54
|
||||||
|
55
|
||||||
|
56
|
||||||
|
57
|
5
tests/queries/0_stateless/02353_ascii.sql
Normal file
5
tests/queries/0_stateless/02353_ascii.sql
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
SELECT ascii('234');
|
||||||
|
SELECT ascii('');
|
||||||
|
SELECT ascii(materialize('234'));
|
||||||
|
SELECT ascii(materialize(''));
|
||||||
|
SELECT ascii(toString(number) || 'abc') from numbers(10);
|
55
tests/queries/0_stateless/02456_progress_tty.expect
Executable file
55
tests/queries/0_stateless/02456_progress_tty.expect
Executable file
@ -0,0 +1,55 @@
|
|||||||
|
#!/usr/bin/expect -f
|
||||||
|
|
||||||
|
set basedir [file dirname $argv0]
|
||||||
|
set basename [file tail $argv0]
|
||||||
|
exp_internal -f $env(CLICKHOUSE_TMP)/$basename.debuglog 0
|
||||||
|
|
||||||
|
log_user 0
|
||||||
|
set timeout 60
|
||||||
|
match_max 100000
|
||||||
|
set stty_init "rows 25 cols 120"
|
||||||
|
|
||||||
|
expect_after {
|
||||||
|
eof { exp_continue }
|
||||||
|
timeout { exit 1 }
|
||||||
|
}
|
||||||
|
|
||||||
|
spawn bash
|
||||||
|
send "source $basedir/../shell_config.sh\r"
|
||||||
|
|
||||||
|
# Progress is displayed by default
|
||||||
|
send "\$CLICKHOUSE_LOCAL --query 'SELECT sum(sleep(1) = 0) FROM numbers(3) SETTINGS max_block_size = 1' >/dev/null\r"
|
||||||
|
expect "Progress: "
|
||||||
|
expect "█"
|
||||||
|
send "\3"
|
||||||
|
|
||||||
|
# It is true even if we redirect both stdout and stderr to /dev/null
|
||||||
|
send "\$CLICKHOUSE_LOCAL --query 'SELECT sum(sleep(1) = 0) FROM numbers(3) SETTINGS max_block_size = 1' >/dev/null 2>&1\r"
|
||||||
|
expect "Progress: "
|
||||||
|
expect "█"
|
||||||
|
send "\3"
|
||||||
|
|
||||||
|
# The option --progress has implicit value of true
|
||||||
|
send "\$CLICKHOUSE_LOCAL --progress --query 'SELECT sum(sleep(1) = 0) FROM numbers(3) SETTINGS max_block_size = 1' >/dev/null 2>&1\r"
|
||||||
|
expect "Progress: "
|
||||||
|
expect "█"
|
||||||
|
send "\3"
|
||||||
|
|
||||||
|
# But we can set it to false
|
||||||
|
send "\$CLICKHOUSE_LOCAL --progress false --query 'SELECT sleep(1), \$\$Hello\$\$ FROM numbers(3) SETTINGS max_block_size = 1' 2>/dev/null\r"
|
||||||
|
expect -exact "0\tHello\r\n"
|
||||||
|
send "\3"
|
||||||
|
|
||||||
|
# As well as to 0 for the same effect
|
||||||
|
send "\$CLICKHOUSE_LOCAL --progress 0 --query 'SELECT sleep(1), \$\$Hello\$\$ FROM numbers(3) SETTINGS max_block_size = 1' 2>/dev/null\r"
|
||||||
|
expect -exact "0\tHello\r\n"
|
||||||
|
send "\3"
|
||||||
|
|
||||||
|
# If we set it to 1, the progress will be displayed as well
|
||||||
|
send "\$CLICKHOUSE_LOCAL --progress 1 --query 'SELECT sum(sleep(1) = 0) FROM numbers(3) SETTINGS max_block_size = 1' >/dev/null 2>&1\r"
|
||||||
|
expect "Progress: "
|
||||||
|
expect "█"
|
||||||
|
send "\3"
|
||||||
|
|
||||||
|
send "exit\r"
|
||||||
|
expect eof
|
12
tests/queries/0_stateless/02473_optimize_old_parts.reference
Normal file
12
tests/queries/0_stateless/02473_optimize_old_parts.reference
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
Without merge
|
||||||
|
3
|
||||||
|
With merge any part range
|
||||||
|
1
|
||||||
|
With merge partition only
|
||||||
|
1
|
||||||
|
With merge replicated any part range
|
||||||
|
1
|
||||||
|
With merge replicated partition only
|
||||||
|
1
|
||||||
|
With merge partition only and new parts
|
||||||
|
3
|
87
tests/queries/0_stateless/02473_optimize_old_parts.sql
Normal file
87
tests/queries/0_stateless/02473_optimize_old_parts.sql
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
-- Tags: long
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS test_without_merge;
|
||||||
|
DROP TABLE IF EXISTS test_with_merge;
|
||||||
|
DROP TABLE IF EXISTS test_replicated;
|
||||||
|
|
||||||
|
SELECT 'Without merge';
|
||||||
|
|
||||||
|
CREATE TABLE test_without_merge (i Int64) ENGINE = MergeTree ORDER BY i;
|
||||||
|
INSERT INTO test_without_merge SELECT 1;
|
||||||
|
INSERT INTO test_without_merge SELECT 2;
|
||||||
|
INSERT INTO test_without_merge SELECT 3;
|
||||||
|
|
||||||
|
SELECT sleepEachRow(1) FROM numbers(9) FORMAT Null;
|
||||||
|
SELECT count(*) FROM system.parts WHERE database = currentDatabase() AND table='test_without_merge' AND active;
|
||||||
|
|
||||||
|
DROP TABLE test_without_merge;
|
||||||
|
|
||||||
|
SELECT 'With merge any part range';
|
||||||
|
|
||||||
|
CREATE TABLE test_with_merge (i Int64) ENGINE = MergeTree ORDER BY i
|
||||||
|
SETTINGS min_age_to_force_merge_seconds=3, min_age_to_force_merge_on_partition_only=false;
|
||||||
|
INSERT INTO test_with_merge SELECT 1;
|
||||||
|
INSERT INTO test_with_merge SELECT 2;
|
||||||
|
INSERT INTO test_with_merge SELECT 3;
|
||||||
|
|
||||||
|
SELECT sleepEachRow(1) FROM numbers(9) FORMAT Null;
|
||||||
|
SELECT count(*) FROM system.parts WHERE database = currentDatabase() AND table='test_with_merge' AND active;
|
||||||
|
|
||||||
|
DROP TABLE test_with_merge;
|
||||||
|
|
||||||
|
SELECT 'With merge partition only';
|
||||||
|
|
||||||
|
CREATE TABLE test_with_merge (i Int64) ENGINE = MergeTree ORDER BY i
|
||||||
|
SETTINGS min_age_to_force_merge_seconds=3, min_age_to_force_merge_on_partition_only=true;
|
||||||
|
INSERT INTO test_with_merge SELECT 1;
|
||||||
|
INSERT INTO test_with_merge SELECT 2;
|
||||||
|
INSERT INTO test_with_merge SELECT 3;
|
||||||
|
|
||||||
|
SELECT sleepEachRow(1) FROM numbers(9) FORMAT Null;
|
||||||
|
SELECT count(*) FROM system.parts WHERE database = currentDatabase() AND table='test_with_merge' AND active;
|
||||||
|
|
||||||
|
DROP TABLE test_with_merge;
|
||||||
|
|
||||||
|
SELECT 'With merge replicated any part range';
|
||||||
|
|
||||||
|
CREATE TABLE test_replicated (i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test02473', 'node') ORDER BY i
|
||||||
|
SETTINGS min_age_to_force_merge_seconds=3, min_age_to_force_merge_on_partition_only=false;
|
||||||
|
INSERT INTO test_replicated SELECT 1;
|
||||||
|
INSERT INTO test_replicated SELECT 2;
|
||||||
|
INSERT INTO test_replicated SELECT 3;
|
||||||
|
|
||||||
|
SELECT sleepEachRow(1) FROM numbers(9) FORMAT Null;
|
||||||
|
SELECT count(*) FROM system.parts WHERE database = currentDatabase() AND table='test_replicated' AND active;
|
||||||
|
|
||||||
|
DROP TABLE test_replicated;
|
||||||
|
|
||||||
|
SELECT 'With merge replicated partition only';
|
||||||
|
|
||||||
|
CREATE TABLE test_replicated (i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test02473_partition_only', 'node') ORDER BY i
|
||||||
|
SETTINGS min_age_to_force_merge_seconds=3, min_age_to_force_merge_on_partition_only=true;
|
||||||
|
INSERT INTO test_replicated SELECT 1;
|
||||||
|
INSERT INTO test_replicated SELECT 2;
|
||||||
|
INSERT INTO test_replicated SELECT 3;
|
||||||
|
|
||||||
|
SELECT sleepEachRow(1) FROM numbers(9) FORMAT Null;
|
||||||
|
SELECT count(*) FROM system.parts WHERE database = currentDatabase() AND table='test_replicated' AND active;
|
||||||
|
|
||||||
|
DROP TABLE test_replicated;
|
||||||
|
|
||||||
|
SELECT 'With merge partition only and new parts';
|
||||||
|
|
||||||
|
CREATE TABLE test_with_merge (i Int64) ENGINE = MergeTree ORDER BY i
|
||||||
|
SETTINGS min_age_to_force_merge_seconds=3, min_age_to_force_merge_on_partition_only=true;
|
||||||
|
SYSTEM STOP MERGES test_with_merge;
|
||||||
|
-- These three parts will have min_age=6 at the time of merge
|
||||||
|
INSERT INTO test_with_merge SELECT 1;
|
||||||
|
INSERT INTO test_with_merge SELECT 2;
|
||||||
|
SELECT sleepEachRow(1) FROM numbers(9) FORMAT Null;
|
||||||
|
-- These three parts will have min_age=0 at the time of merge
|
||||||
|
-- and so, nothing will be merged.
|
||||||
|
INSERT INTO test_with_merge SELECT 3;
|
||||||
|
SYSTEM START MERGES test_with_merge;
|
||||||
|
|
||||||
|
SELECT count(*) FROM system.parts WHERE database = currentDatabase() AND table='test_with_merge' AND active;
|
||||||
|
|
||||||
|
DROP TABLE test_with_merge;
|
1
tests/queries/0_stateless/02476_fix_cast_parser_bug.sql
Normal file
1
tests/queries/0_stateless/02476_fix_cast_parser_bug.sql
Normal file
@ -0,0 +1 @@
|
|||||||
|
SELECT CAST(a, b -> c) ++; -- { clientError SYNTAX_ERROR }
|
@ -20,17 +20,13 @@ add_subdirectory (report)
|
|||||||
# Not used in package
|
# Not used in package
|
||||||
if (NOT DEFINED ENABLE_UTILS OR ENABLE_UTILS)
|
if (NOT DEFINED ENABLE_UTILS OR ENABLE_UTILS)
|
||||||
add_subdirectory (compressor)
|
add_subdirectory (compressor)
|
||||||
add_subdirectory (iotest)
|
|
||||||
add_subdirectory (corrector_utf8)
|
add_subdirectory (corrector_utf8)
|
||||||
add_subdirectory (zookeeper-cli)
|
add_subdirectory (zookeeper-cli)
|
||||||
add_subdirectory (zookeeper-dump-tree)
|
add_subdirectory (zookeeper-dump-tree)
|
||||||
add_subdirectory (zookeeper-remove-by-list)
|
add_subdirectory (zookeeper-remove-by-list)
|
||||||
add_subdirectory (zookeeper-create-entry-to-download-part)
|
|
||||||
add_subdirectory (zookeeper-adjust-block-numbers-to-parts)
|
|
||||||
add_subdirectory (wikistat-loader)
|
add_subdirectory (wikistat-loader)
|
||||||
add_subdirectory (check-marks)
|
add_subdirectory (check-marks)
|
||||||
add_subdirectory (checksum-for-compressed-block)
|
add_subdirectory (checksum-for-compressed-block)
|
||||||
add_subdirectory (db-generator)
|
|
||||||
add_subdirectory (wal-dump)
|
add_subdirectory (wal-dump)
|
||||||
add_subdirectory (check-mysql-binlog)
|
add_subdirectory (check-mysql-binlog)
|
||||||
add_subdirectory (keeper-bench)
|
add_subdirectory (keeper-bench)
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
## This parser is unsupported
|
||||||
|
|
||||||
|
We keep it in this repository for your curiosity. But this is not the parser of ClickHouse.
|
||||||
|
|
||||||
## How to generate source code files from grammar
|
## How to generate source code files from grammar
|
||||||
|
|
||||||
Grammar is located inside `ClickHouseLexer.g4` and `ClickHouseParser.g4` files.
|
Grammar is located inside `ClickHouseLexer.g4` and `ClickHouseParser.g4` files.
|
||||||
|
@ -1,2 +0,0 @@
|
|||||||
clickhouse_add_executable (query_db_generator query_db_generator.cpp)
|
|
||||||
target_link_libraries(query_db_generator PRIVATE clickhouse_parsers boost::program_options)
|
|
@ -1,35 +0,0 @@
|
|||||||
# Clickhouse query analysis
|
|
||||||
|
|
||||||
Here we will consider only `SELECT` queries, i.e. those queries that get data from the table.
|
|
||||||
The built-in Clickhouse parser accepts a string as input, which is a query. Among 14 main clauses of `SELECT` statement: `WITH`, `SELECT`, `TABLES`, `PREWHERE`, `WHERE`, `GROUP_BY`, `HAVING`, `ORDER_BY`, `LIMIT_BY_OFFSET`, `LIMIT_BY_LENGTH`, `LIMIT_BY`, `LIMIT_OFFSET`, `LIMIT_LENGTH`, `SETTINGS`, we will analyze the `SELECT`, `TABLES`, `WHERE`, `GROUP_BY`, `HAVING`, `ORDER_BY` clauses because the most of data is there. We need this data to analyze the structure and to identify values. The parser issues a tree structure after parsing a query, where each node is a specific query execution operation, a function over values, a constant, a designation, etc. Nodes also have subtrees where their arguments or suboperations are located. We will try to reveal the data we need by avoiding this tree.
|
|
||||||
|
|
||||||
## Scheme analysis
|
|
||||||
|
|
||||||
It is necessary to determine possible tables by a query. Having a query string, you can understand which parts of it represent the names of the tables, so you can determine their number in our database.
|
|
||||||
In the Clickhouse parser, `TABLES` (Figure 1) is a query subtree responsible for tables where we get data. It contains the main table where the columns come from, as well as the `JOIN` operations that are performed in the query. Avoiding all nodes in the subtree, we use the names of the tables and databases where they are located, as well as their alias, i.e. the shortened names chosen by the query author. We may need these names to determine the ownership of the column in the future.
|
|
||||||
Thus, we get a set of databases for the query, as well as tables and their aliases, with the help of them a query is made.
|
|
||||||
|
|
||||||
Then we need to define the set of columns that are in the query and the tables they can refer to. The set of columns in each table is already known during the query execution. Therefore, the program automatically links the column and table at runtime. However, in our case, it is impossible to unambiguously interpret the belonging of a column to a specific table, for example, in the following query `SELECT column1, column2, column3 FROM table1 JOIN table2 on table1.column2 = table2.column3`. In this case, we can say which table `column2` and `column3` belong to. However, `column1` can belong to either the first or the second table. We will refer undefined columns to the main table, on which a query is made, for unambiguous interpretation of such cases. For example, in this case, it will be `table1`.
|
|
||||||
All columns in the tree are in `IDENTIFIER` type nodes, which are in the `SELECT`, `TABLES`, `WHERE`, `GROUP_BY`, `HAVING`, `ORDER_BY` subtrees. We form a set of all tables recursively avoiding the subtrees, then we split the column into constituents such as the table (if it is explicitly specified with a dot) and the name. Then, since the table can be an alias, we replace the alias with the original table name. We now have a list of all the columns and tables they belong to. We define the main query table for non-table columns.
|
|
||||||
|
|
||||||
## Column analysis
|
|
||||||
|
|
||||||
Then we need to exactly define data types for columns that have a value in the query. An example is the boolean `WHERE` clause where we test boolean expressions in its attributes. If the query specifies `column > 5`, then we can conclude that this column contains a numeric value, or if the `LIKE` expression is applied to the attribute, then the attribute has a string type.
|
|
||||||
In this part, you need to learn how to extract such expressions from a query and match data types for columns, where it is possible. At the same time, it is clear that it is not always possible to make an unambiguous decision about the type of a particular attribute from the available values. For example, `column > 5` can mean many numeric types such as `UINT8`, `UINT32`, `INT32`, `INT64`, etc. It is necessary to determine the interpretation of certain values since searching through all possible values can be quite large and long.
|
|
||||||
It can take a long time to iterate over all possible values, so we use `INT64` and `FLOAT64` types for numeric values, `STRING` for strings, `DATE` and `DATETIME` for dates, and `ARRAY`.
|
|
||||||
We can determine column values using boolean, arithmetic and other functions on the column values that are specified in the query. Such functions are in the `SELECT` and `WHERE` subtrees. The function parameter can be a constant, a column or another function (Figure 2). Thus, the following parameters can help to understand the type of the column:
|
|
||||||
- The types of arguments that a function can take, for example, the `TOSTARTOFMINUTE` function (truncate time up to a multiple of 5 minutes down) can only accept `DATETIME`, so if the argument of this function is a column, then this column has `DATETIME` type.
|
|
||||||
- The types of the remaining arguments in this function. For example, the `EQUALS` function means equality of its argument types, so if a constant and a column are present in this function, then we can define the type of the column as the type of the constant.
|
|
||||||
|
|
||||||
Thus, we define the possible argument types, the return type, the parameter for each function, and the function arguments of the identical type. The recursive function handler will determine the possible types of columns used in these functions by the values of the arguments, and then return the possible types of the function's result.
|
|
||||||
Now, for each column, we have many possible types of values. We will choose one specific type from this set to interpret the query unambiguously.
|
|
||||||
|
|
||||||
## Column values definition
|
|
||||||
|
|
||||||
At this stage, we already have a certain structure of the database tables, we need to fill this table with values. We should understand which columns depend on each other when executing the function (for example, the join is done according to two columns, which means that they must have the same values). We also need to understand what values the columns must have to fulfill various conditions during execution.
|
|
||||||
We search for all comparison operations in our query to achieve the goal. If the arguments of the operation are two columns, then we consider them linked. If the arguments are the column and the value, then we assign that value to the possible column value and add the value with some noise. A random number is a noise for a numeric type, it is a random number of days for a date, etc. In this case, a handler for this operation is required for each comparison operation, which generates at least two values, one of them is the operation condition, and the other is not. For example, a value greater than 5 and less than or equal to 5 must be assigned for the operation `column1 > 5`, `column1`, for the operation `column2 LIKE some% string` the same is true. The satisfying and not satisfying expression must be assigned to `column2`.
|
|
||||||
Now we have many associated columns and many values. We know that the connectivity of columns is symmetric, but we need to add transitivity for a complete definition, because if `column1 = column2` and `column2 = column3`, then `column1 = column3`, but this does not follow from the construction. Accordingly, we need to extend the connectivity across all columns. We combine multiple values for each column with the values associated with it. If we have columns with no values, then we generate random values.
|
|
||||||
|
|
||||||
## Generation
|
|
||||||
|
|
||||||
We have a complete view of the database schema as well as many values for each table now. We will generate data by cartesian product of the value set of each column for a specific table. Thus, we get a set for each table, consisting of sets of values for each column. We start generating queries that create this table and fill it with data. We generate the `CREATE QUERY` that creates this table based on the structure of the table and the types of its columns, and then we generate the `INSERT QUERY` over the set of values, which fills the table with data.
|
|
File diff suppressed because it is too large
Load Diff
@ -1,9 +0,0 @@
|
|||||||
|
|
||||||
clickhouse_add_executable (iotest iotest.cpp ${SRCS})
|
|
||||||
target_link_libraries (iotest PRIVATE clickhouse_common_io)
|
|
||||||
|
|
||||||
clickhouse_add_executable (iotest_nonblock iotest_nonblock.cpp ${SRCS})
|
|
||||||
target_link_libraries (iotest_nonblock PRIVATE clickhouse_common_io)
|
|
||||||
|
|
||||||
clickhouse_add_executable (iotest_aio iotest_aio.cpp ${SRCS})
|
|
||||||
target_link_libraries (iotest_aio PRIVATE clickhouse_common_io)
|
|
@ -1,197 +0,0 @@
|
|||||||
#include <IO/BufferWithOwnMemory.h>
|
|
||||||
#include <IO/ReadHelpers.h>
|
|
||||||
#include <pcg_random.hpp>
|
|
||||||
#include <Poco/Exception.h>
|
|
||||||
#include <Common/Exception.h>
|
|
||||||
#include <Common/Stopwatch.h>
|
|
||||||
#include <Common/ThreadPool.h>
|
|
||||||
#include <Common/randomSeed.h>
|
|
||||||
#include <base/getPageSize.h>
|
|
||||||
|
|
||||||
#include <cstdlib>
|
|
||||||
#include <iomanip>
|
|
||||||
#include <iostream>
|
|
||||||
#include <random>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <ctime>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int CANNOT_OPEN_FILE;
|
|
||||||
extern const int CANNOT_CLOSE_FILE;
|
|
||||||
extern const int CANNOT_READ_FROM_FILE_DESCRIPTOR;
|
|
||||||
extern const int CANNOT_WRITE_TO_FILE_DESCRIPTOR;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
enum Mode
|
|
||||||
{
|
|
||||||
MODE_NONE = 0,
|
|
||||||
MODE_READ = 1,
|
|
||||||
MODE_WRITE = 2,
|
|
||||||
MODE_ALIGNED = 4,
|
|
||||||
MODE_DIRECT = 8,
|
|
||||||
MODE_SYNC = 16,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
void thread(int fd, int mode, size_t min_offset, size_t max_offset, size_t block_size, size_t count)
|
|
||||||
{
|
|
||||||
using namespace DB;
|
|
||||||
|
|
||||||
Memory<> direct_buf(block_size, ::getPageSize());
|
|
||||||
std::vector<char> simple_buf(block_size);
|
|
||||||
|
|
||||||
char * buf;
|
|
||||||
if ((mode & MODE_DIRECT))
|
|
||||||
buf = direct_buf.data();
|
|
||||||
else
|
|
||||||
buf = simple_buf.data();
|
|
||||||
|
|
||||||
pcg64 rng(randomSeed());
|
|
||||||
|
|
||||||
for (size_t i = 0; i < count; ++i)
|
|
||||||
{
|
|
||||||
uint64_t rand_result1 = rng();
|
|
||||||
uint64_t rand_result2 = rng();
|
|
||||||
uint64_t rand_result3 = rng();
|
|
||||||
|
|
||||||
size_t rand_result = rand_result1 ^ (rand_result2 << 22) ^ (rand_result3 << 43);
|
|
||||||
size_t offset;
|
|
||||||
if ((mode & MODE_DIRECT) || (mode & MODE_ALIGNED))
|
|
||||||
offset = min_offset + rand_result % ((max_offset - min_offset) / block_size) * block_size;
|
|
||||||
else
|
|
||||||
offset = min_offset + rand_result % (max_offset - min_offset - block_size + 1);
|
|
||||||
|
|
||||||
if (mode & MODE_READ)
|
|
||||||
{
|
|
||||||
if (static_cast<int>(block_size) != pread(fd, buf, block_size, offset))
|
|
||||||
throwFromErrno("Cannot read", ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (static_cast<int>(block_size) != pwrite(fd, buf, block_size, offset))
|
|
||||||
throwFromErrno("Cannot write", ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int mainImpl(int argc, char ** argv)
|
|
||||||
{
|
|
||||||
using namespace DB;
|
|
||||||
|
|
||||||
const char * file_name = nullptr;
|
|
||||||
int mode = MODE_NONE;
|
|
||||||
UInt64 min_offset = 0;
|
|
||||||
UInt64 max_offset = 0;
|
|
||||||
UInt64 block_size = 0;
|
|
||||||
UInt64 threads = 0;
|
|
||||||
UInt64 count = 0;
|
|
||||||
|
|
||||||
if (argc != 8)
|
|
||||||
{
|
|
||||||
std::cerr << "Usage: " << argv[0] << " file_name (r|w)[a][d][s] min_offset max_offset block_size threads count" << std::endl <<
|
|
||||||
"a - aligned, d - direct, s - sync" << std::endl;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
file_name = argv[1];
|
|
||||||
min_offset = parse<UInt64>(argv[3]);
|
|
||||||
max_offset = parse<UInt64>(argv[4]);
|
|
||||||
block_size = parse<UInt64>(argv[5]);
|
|
||||||
threads = parse<UInt64>(argv[6]);
|
|
||||||
count = parse<UInt64>(argv[7]);
|
|
||||||
|
|
||||||
for (int i = 0; argv[2][i]; ++i)
|
|
||||||
{
|
|
||||||
char c = argv[2][i];
|
|
||||||
switch (c)
|
|
||||||
{
|
|
||||||
case 'r':
|
|
||||||
mode |= MODE_READ;
|
|
||||||
break;
|
|
||||||
case 'w':
|
|
||||||
mode |= MODE_WRITE;
|
|
||||||
break;
|
|
||||||
case 'a':
|
|
||||||
mode |= MODE_ALIGNED;
|
|
||||||
break;
|
|
||||||
case 'd':
|
|
||||||
mode |= MODE_DIRECT;
|
|
||||||
break;
|
|
||||||
case 's':
|
|
||||||
mode |= MODE_SYNC;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw Poco::Exception("Invalid mode");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ThreadPool pool(threads);
|
|
||||||
|
|
||||||
#ifndef OS_DARWIN
|
|
||||||
int fd = open(file_name, ((mode & MODE_READ) ? O_RDONLY : O_WRONLY) | ((mode & MODE_DIRECT) ? O_DIRECT : 0) | ((mode & MODE_SYNC) ? O_SYNC : 0));
|
|
||||||
#else
|
|
||||||
int fd = open(file_name, ((mode & MODE_READ) ? O_RDONLY : O_WRONLY) | ((mode & MODE_SYNC) ? O_SYNC : 0));
|
|
||||||
#endif
|
|
||||||
if (-1 == fd)
|
|
||||||
throwFromErrno("Cannot open file", ErrorCodes::CANNOT_OPEN_FILE);
|
|
||||||
#ifdef OS_DARWIN
|
|
||||||
if (mode & MODE_DIRECT)
|
|
||||||
if (fcntl(fd, F_NOCACHE, 1) == -1)
|
|
||||||
throwFromErrno("Cannot open file", ErrorCodes::CANNOT_CLOSE_FILE);
|
|
||||||
#endif
|
|
||||||
Stopwatch watch;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < threads; ++i)
|
|
||||||
pool.scheduleOrThrowOnError([=]{ thread(fd, mode, min_offset, max_offset, block_size, count); });
|
|
||||||
pool.wait();
|
|
||||||
|
|
||||||
#if defined(OS_DARWIN)
|
|
||||||
fsync(fd);
|
|
||||||
#else
|
|
||||||
fdatasync(fd);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
watch.stop();
|
|
||||||
|
|
||||||
if (0 != close(fd))
|
|
||||||
throwFromErrno("Cannot close file", ErrorCodes::CANNOT_CLOSE_FILE);
|
|
||||||
|
|
||||||
std::cout << std::fixed << std::setprecision(2)
|
|
||||||
<< "Done " << count << " * " << threads << " ops";
|
|
||||||
if (mode & MODE_ALIGNED)
|
|
||||||
std::cout << " (aligned)";
|
|
||||||
if (mode & MODE_DIRECT)
|
|
||||||
std::cout << " (direct)";
|
|
||||||
if (mode & MODE_SYNC)
|
|
||||||
std::cout << " (sync)";
|
|
||||||
std::cout << " in " << watch.elapsedSeconds() << " sec."
|
|
||||||
<< ", " << count * threads / watch.elapsedSeconds() << " ops/sec."
|
|
||||||
<< ", " << count * threads * block_size / watch.elapsedSeconds() / 1000000 << " MB/sec."
|
|
||||||
<< std::endl;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int main(int argc, char ** argv)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
return mainImpl(argc, argv);
|
|
||||||
}
|
|
||||||
catch (const Poco::Exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << e.what() << ", " << e.message() << std::endl;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,203 +0,0 @@
|
|||||||
#if !defined(OS_LINUX)
|
|
||||||
int main(int, char **) { return 0; }
|
|
||||||
#else
|
|
||||||
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <cstdlib>
|
|
||||||
#include <ctime>
|
|
||||||
#include <iostream>
|
|
||||||
#include <iomanip>
|
|
||||||
#include <vector>
|
|
||||||
#include <Poco/Exception.h>
|
|
||||||
#include <Common/Exception.h>
|
|
||||||
#include <Common/ThreadPool.h>
|
|
||||||
#include <Common/Stopwatch.h>
|
|
||||||
#include <Common/randomSeed.h>
|
|
||||||
#include <base/getPageSize.h>
|
|
||||||
#include <pcg_random.hpp>
|
|
||||||
#include <IO/BufferWithOwnMemory.h>
|
|
||||||
#include <IO/ReadHelpers.h>
|
|
||||||
#include <cstdio>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <IO/AIO.h>
|
|
||||||
#include <malloc.h>
|
|
||||||
#include <sys/syscall.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int CANNOT_OPEN_FILE;
|
|
||||||
extern const int CANNOT_CLOSE_FILE;
|
|
||||||
extern const int CANNOT_IO_SUBMIT;
|
|
||||||
extern const int CANNOT_IO_GETEVENTS;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
enum Mode
|
|
||||||
{
|
|
||||||
MODE_READ = 1,
|
|
||||||
MODE_WRITE = 2,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
void thread(int fd, int mode, size_t min_offset, size_t max_offset, size_t block_size, size_t buffers_count, size_t count)
|
|
||||||
{
|
|
||||||
using namespace DB;
|
|
||||||
|
|
||||||
AIOContext ctx;
|
|
||||||
|
|
||||||
std::vector<Memory<>> buffers(buffers_count);
|
|
||||||
for (size_t i = 0; i < buffers_count; ++i)
|
|
||||||
buffers[i] = Memory<>(block_size, ::getPageSize());
|
|
||||||
|
|
||||||
pcg64_fast rng(randomSeed());
|
|
||||||
|
|
||||||
size_t in_progress = 0;
|
|
||||||
size_t blocks_sent = 0;
|
|
||||||
std::vector<bool> buffer_used(buffers_count, false);
|
|
||||||
std::vector<iocb> iocbs(buffers_count);
|
|
||||||
std::vector<iocb*> query_cbs;
|
|
||||||
std::vector<io_event> events(buffers_count);
|
|
||||||
|
|
||||||
while (blocks_sent < count || in_progress > 0)
|
|
||||||
{
|
|
||||||
/// Prepare queries.
|
|
||||||
query_cbs.clear();
|
|
||||||
for (size_t i = 0; i < buffers_count; ++i)
|
|
||||||
{
|
|
||||||
if (blocks_sent >= count || in_progress >= buffers_count)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (buffer_used[i])
|
|
||||||
continue;
|
|
||||||
|
|
||||||
buffer_used[i] = true;
|
|
||||||
++blocks_sent;
|
|
||||||
++in_progress;
|
|
||||||
|
|
||||||
char * buf = buffers[i].data();
|
|
||||||
|
|
||||||
uint64_t rand_result1 = rng();
|
|
||||||
uint64_t rand_result2 = rng();
|
|
||||||
uint64_t rand_result3 = rng();
|
|
||||||
|
|
||||||
size_t rand_result = rand_result1 ^ (rand_result2 << 22) ^ (rand_result3 << 43);
|
|
||||||
size_t offset = min_offset + rand_result % ((max_offset - min_offset) / block_size) * block_size;
|
|
||||||
|
|
||||||
iocb & cb = iocbs[i];
|
|
||||||
memset(&cb, 0, sizeof(cb));
|
|
||||||
cb.aio_buf = reinterpret_cast<UInt64>(buf);
|
|
||||||
cb.aio_fildes = fd;
|
|
||||||
cb.aio_nbytes = block_size;
|
|
||||||
cb.aio_offset = offset;
|
|
||||||
cb.aio_data = static_cast<UInt64>(i);
|
|
||||||
|
|
||||||
if (mode == MODE_READ)
|
|
||||||
{
|
|
||||||
cb.aio_lio_opcode = IOCB_CMD_PREAD;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
cb.aio_lio_opcode = IOCB_CMD_PWRITE;
|
|
||||||
}
|
|
||||||
|
|
||||||
query_cbs.push_back(&cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send queries.
|
|
||||||
if (io_submit(ctx.ctx, query_cbs.size(), query_cbs.data()) < 0)
|
|
||||||
throwFromErrno("io_submit failed", ErrorCodes::CANNOT_IO_SUBMIT);
|
|
||||||
|
|
||||||
/// Receive answers. If we have something else to send, then receive at least one answer (after that send them), otherwise wait all answers.
|
|
||||||
memset(events.data(), 0, buffers_count * sizeof(events[0]));
|
|
||||||
int evs = io_getevents(ctx.ctx, (blocks_sent < count ? 1 : in_progress), buffers_count, events.data(), nullptr);
|
|
||||||
if (evs < 0)
|
|
||||||
throwFromErrno("io_getevents failed", ErrorCodes::CANNOT_IO_GETEVENTS);
|
|
||||||
|
|
||||||
for (int i = 0; i < evs; ++i)
|
|
||||||
{
|
|
||||||
int b = static_cast<int>(events[i].data);
|
|
||||||
if (events[i].res != static_cast<int>(block_size))
|
|
||||||
throw Poco::Exception("read/write error");
|
|
||||||
--in_progress;
|
|
||||||
buffer_used[b] = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int mainImpl(int argc, char ** argv)
|
|
||||||
{
|
|
||||||
using namespace DB;
|
|
||||||
|
|
||||||
const char * file_name = nullptr;
|
|
||||||
int mode = MODE_READ;
|
|
||||||
UInt64 min_offset = 0;
|
|
||||||
UInt64 max_offset = 0;
|
|
||||||
UInt64 block_size = 0;
|
|
||||||
UInt64 buffers_count = 0;
|
|
||||||
UInt64 threads_count = 0;
|
|
||||||
UInt64 count = 0;
|
|
||||||
|
|
||||||
if (argc != 9)
|
|
||||||
{
|
|
||||||
std::cerr << "Usage: " << argv[0] << " file_name r|w min_offset max_offset block_size threads buffers count" << std::endl;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
file_name = argv[1];
|
|
||||||
if (argv[2][0] == 'w')
|
|
||||||
mode = MODE_WRITE;
|
|
||||||
min_offset = parse<UInt64>(argv[3]);
|
|
||||||
max_offset = parse<UInt64>(argv[4]);
|
|
||||||
block_size = parse<UInt64>(argv[5]);
|
|
||||||
threads_count = parse<UInt64>(argv[6]);
|
|
||||||
buffers_count = parse<UInt64>(argv[7]);
|
|
||||||
count = parse<UInt64>(argv[8]);
|
|
||||||
|
|
||||||
int fd = open(file_name, ((mode == MODE_READ) ? O_RDONLY : O_WRONLY) | O_DIRECT);
|
|
||||||
if (-1 == fd)
|
|
||||||
throwFromErrno("Cannot open file", ErrorCodes::CANNOT_OPEN_FILE);
|
|
||||||
|
|
||||||
ThreadPool pool(threads_count);
|
|
||||||
|
|
||||||
Stopwatch watch;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < threads_count; ++i)
|
|
||||||
pool.scheduleOrThrowOnError([=]{ thread(fd, mode, min_offset, max_offset, block_size, buffers_count, count); });
|
|
||||||
pool.wait();
|
|
||||||
|
|
||||||
watch.stop();
|
|
||||||
|
|
||||||
if (0 != close(fd))
|
|
||||||
throwFromErrno("Cannot close file", ErrorCodes::CANNOT_CLOSE_FILE);
|
|
||||||
|
|
||||||
std::cout << std::fixed << std::setprecision(2)
|
|
||||||
<< "Done " << count << " * " << threads_count << " ops";
|
|
||||||
std::cout << " in " << watch.elapsedSeconds() << " sec."
|
|
||||||
<< ", " << count * threads_count / watch.elapsedSeconds() << " ops/sec."
|
|
||||||
<< ", " << count * threads_count * block_size / watch.elapsedSeconds() / 1000000 << " MB/sec."
|
|
||||||
<< std::endl;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int main(int argc, char ** argv)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
return mainImpl(argc, argv);
|
|
||||||
}
|
|
||||||
catch (const Poco::Exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << e.what() << ", " << e.message() << std::endl;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,177 +0,0 @@
|
|||||||
#include <IO/ReadHelpers.h>
|
|
||||||
#include <pcg_random.hpp>
|
|
||||||
#include <Poco/Exception.h>
|
|
||||||
#include <Common/Exception.h>
|
|
||||||
#include <Common/Stopwatch.h>
|
|
||||||
#include <Common/ThreadPool.h>
|
|
||||||
#include <Common/randomSeed.h>
|
|
||||||
|
|
||||||
#include <iomanip>
|
|
||||||
#include <iostream>
|
|
||||||
#include <random>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <poll.h>
|
|
||||||
#include <cstdlib>
|
|
||||||
#include <ctime>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
#if defined (OS_LINUX)
|
|
||||||
# include <malloc.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int CANNOT_OPEN_FILE;
|
|
||||||
extern const int CANNOT_CLOSE_FILE;
|
|
||||||
extern const int CANNOT_READ_FROM_FILE_DESCRIPTOR;
|
|
||||||
extern const int CANNOT_WRITE_TO_FILE_DESCRIPTOR;
|
|
||||||
extern const int CANNOT_FSYNC;
|
|
||||||
extern const int SYSTEM_ERROR;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
enum Mode
|
|
||||||
{
|
|
||||||
MODE_READ,
|
|
||||||
MODE_WRITE,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
int mainImpl(int argc, char ** argv)
|
|
||||||
{
|
|
||||||
using namespace DB;
|
|
||||||
|
|
||||||
const char * file_name = nullptr;
|
|
||||||
Mode mode = MODE_READ;
|
|
||||||
UInt64 min_offset = 0;
|
|
||||||
UInt64 max_offset = 0;
|
|
||||||
UInt64 block_size = 0;
|
|
||||||
UInt64 descriptors = 0;
|
|
||||||
UInt64 count = 0;
|
|
||||||
|
|
||||||
if (argc != 8)
|
|
||||||
{
|
|
||||||
std::cerr << "Usage: " << argv[0] << " file_name r|w min_offset max_offset block_size descriptors count" << std::endl;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
file_name = argv[1];
|
|
||||||
min_offset = parse<UInt64>(argv[3]);
|
|
||||||
max_offset = parse<UInt64>(argv[4]);
|
|
||||||
block_size = parse<UInt64>(argv[5]);
|
|
||||||
descriptors = parse<UInt64>(argv[6]);
|
|
||||||
count = parse<UInt64>(argv[7]);
|
|
||||||
|
|
||||||
if (!strcmp(argv[2], "r"))
|
|
||||||
mode = MODE_READ;
|
|
||||||
else if (!strcmp(argv[2], "w"))
|
|
||||||
mode = MODE_WRITE;
|
|
||||||
else
|
|
||||||
throw Poco::Exception("Invalid mode");
|
|
||||||
|
|
||||||
std::vector<int> fds(descriptors);
|
|
||||||
for (size_t i = 0; i < descriptors; ++i)
|
|
||||||
{
|
|
||||||
fds[i] = open(file_name, O_SYNC | ((mode == MODE_READ) ? O_RDONLY : O_WRONLY));
|
|
||||||
if (-1 == fds[i])
|
|
||||||
throwFromErrno("Cannot open file", ErrorCodes::CANNOT_OPEN_FILE);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<char> buf(block_size);
|
|
||||||
|
|
||||||
pcg64 rng(randomSeed());
|
|
||||||
|
|
||||||
Stopwatch watch;
|
|
||||||
|
|
||||||
std::vector<pollfd> polls(descriptors);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < descriptors; ++i)
|
|
||||||
{
|
|
||||||
polls[i].fd = fds[i];
|
|
||||||
polls[i].events = (mode == MODE_READ) ? POLLIN : POLLOUT;
|
|
||||||
polls[i].revents = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ops = 0;
|
|
||||||
while (ops < count)
|
|
||||||
{
|
|
||||||
if (poll(polls.data(), static_cast<nfds_t>(descriptors), -1) <= 0)
|
|
||||||
throwFromErrno("poll failed", ErrorCodes::SYSTEM_ERROR);
|
|
||||||
for (size_t i = 0; i < descriptors; ++i)
|
|
||||||
{
|
|
||||||
if (!polls[i].revents)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (polls[i].revents != polls[i].events)
|
|
||||||
throw Poco::Exception("revents indicates error");
|
|
||||||
polls[i].revents = 0;
|
|
||||||
++ops;
|
|
||||||
|
|
||||||
uint64_t rand_result1 = rng();
|
|
||||||
uint64_t rand_result2 = rng();
|
|
||||||
uint64_t rand_result3 = rng();
|
|
||||||
|
|
||||||
size_t rand_result = rand_result1 ^ (rand_result2 << 22) ^ (rand_result3 << 43);
|
|
||||||
size_t offset;
|
|
||||||
offset = min_offset + rand_result % ((max_offset - min_offset) / block_size) * block_size;
|
|
||||||
|
|
||||||
if (mode == MODE_READ)
|
|
||||||
{
|
|
||||||
if (static_cast<int>(block_size) != pread(fds[i], buf.data(), block_size, offset))
|
|
||||||
throwFromErrno("Cannot read", ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (static_cast<int>(block_size) != pwrite(fds[i], buf.data(), block_size, offset))
|
|
||||||
throwFromErrno("Cannot write", ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t i = 0; i < descriptors; ++i)
|
|
||||||
{
|
|
||||||
#if defined(OS_DARWIN)
|
|
||||||
if (fsync(fds[i]))
|
|
||||||
throwFromErrno("Cannot fsync", ErrorCodes::CANNOT_FSYNC);
|
|
||||||
#else
|
|
||||||
if (fdatasync(fds[i]))
|
|
||||||
throwFromErrno("Cannot fdatasync", ErrorCodes::CANNOT_FSYNC);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
watch.stop();
|
|
||||||
|
|
||||||
for (size_t i = 0; i < descriptors; ++i)
|
|
||||||
{
|
|
||||||
if (0 != close(fds[i]))
|
|
||||||
throwFromErrno("Cannot close file", ErrorCodes::CANNOT_CLOSE_FILE);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << std::fixed << std::setprecision(2)
|
|
||||||
<< "Done " << count << " ops" << " in " << watch.elapsedSeconds() << " sec."
|
|
||||||
<< ", " << count / watch.elapsedSeconds() << " ops/sec."
|
|
||||||
<< ", " << count * block_size / watch.elapsedSeconds() / 1000000 << " MB/sec."
|
|
||||||
<< std::endl;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int main(int argc, char ** argv)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
return mainImpl(argc, argv);
|
|
||||||
}
|
|
||||||
catch (const Poco::Exception & e)
|
|
||||||
{
|
|
||||||
std::cerr << e.what() << ", " << e.message() << std::endl;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,3 +0,0 @@
|
|||||||
clickhouse_add_executable (zookeeper-adjust-block-numbers-to-parts main.cpp ${SRCS})
|
|
||||||
target_compile_options(zookeeper-adjust-block-numbers-to-parts PRIVATE -Wno-format)
|
|
||||||
target_link_libraries (zookeeper-adjust-block-numbers-to-parts PRIVATE clickhouse_aggregate_functions dbms clickhouse_common_zookeeper boost::program_options)
|
|
@ -1,286 +0,0 @@
|
|||||||
#include <Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h>
|
|
||||||
#include <Storages/MergeTree/MergeTreePartInfo.h>
|
|
||||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
|
||||||
#include <boost/algorithm/string.hpp>
|
|
||||||
#include <boost/program_options.hpp>
|
|
||||||
#include <IO/ReadHelpers.h>
|
|
||||||
|
|
||||||
#include <unordered_map>
|
|
||||||
#include <cmath>
|
|
||||||
|
|
||||||
|
|
||||||
std::vector<std::string> getAllShards(zkutil::ZooKeeper & zk, const std::string & root)
|
|
||||||
{
|
|
||||||
return zk.getChildren(root);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
std::vector<std::string> removeNotExistingShards(zkutil::ZooKeeper & zk, const std::string & root, const std::vector<std::string> & shards)
|
|
||||||
{
|
|
||||||
auto existing_shards = getAllShards(zk, root);
|
|
||||||
std::vector<std::string> filtered_shards;
|
|
||||||
filtered_shards.reserve(shards.size());
|
|
||||||
for (const auto & shard : shards)
|
|
||||||
if (std::find(existing_shards.begin(), existing_shards.end(), shard) == existing_shards.end())
|
|
||||||
std::cerr << "Shard " << shard << " not found." << std::endl;
|
|
||||||
else
|
|
||||||
filtered_shards.emplace_back(shard);
|
|
||||||
return filtered_shards;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
std::vector<std::string> getAllTables(zkutil::ZooKeeper & zk, const std::string & root, const std::string & shard)
|
|
||||||
{
|
|
||||||
return zk.getChildren(root + "/" + shard);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
std::vector<std::string> removeNotExistingTables(zkutil::ZooKeeper & zk, const std::string & root, const std::string & shard, const std::vector<std::string> & tables)
|
|
||||||
{
|
|
||||||
auto existing_tables = getAllTables(zk, root, shard);
|
|
||||||
std::vector<std::string> filtered_tables;
|
|
||||||
filtered_tables.reserve(tables.size());
|
|
||||||
for (const auto & table : tables)
|
|
||||||
if (std::find(existing_tables.begin(), existing_tables.end(), table) == existing_tables.end())
|
|
||||||
std::cerr << "\tTable " << table << " not found on shard " << shard << "." << std::endl;
|
|
||||||
else
|
|
||||||
filtered_tables.emplace_back(table);
|
|
||||||
return filtered_tables;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Int64 getMaxBlockNumberForPartition(zkutil::ZooKeeper & zk,
|
|
||||||
const std::string & replica_path,
|
|
||||||
const std::string & partition_name,
|
|
||||||
const DB::MergeTreeDataFormatVersion & format_version)
|
|
||||||
{
|
|
||||||
auto replicas_path = replica_path + "/replicas";
|
|
||||||
auto replica_hosts = zk.getChildren(replicas_path);
|
|
||||||
Int64 max_block_num = 0;
|
|
||||||
for (const auto & replica_host : replica_hosts)
|
|
||||||
{
|
|
||||||
auto parts = zk.getChildren(replicas_path + "/" + replica_host + "/parts");
|
|
||||||
for (const auto & part : parts)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto info = DB::MergeTreePartInfo::fromPartName(part, format_version);
|
|
||||||
if (info.partition_id == partition_name)
|
|
||||||
max_block_num = std::max<Int64>(info.max_block, max_block_num);
|
|
||||||
}
|
|
||||||
catch (const DB::Exception & ex)
|
|
||||||
{
|
|
||||||
std::cerr << ex.displayText() << ", Part " << part << "skipped." << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return max_block_num;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Int64 getCurrentBlockNumberForPartition(zkutil::ZooKeeper & zk, const std::string & part_path)
|
|
||||||
{
|
|
||||||
Coordination::Stat stat;
|
|
||||||
zk.get(part_path, &stat);
|
|
||||||
|
|
||||||
/// References:
|
|
||||||
/// https://stackoverflow.com/a/10347910
|
|
||||||
/// https://bowenli86.github.io/2016/07/07/distributed%20system/zookeeper/How-does-ZooKeeper-s-persistent-sequential-id-work/
|
|
||||||
return (stat.cversion + stat.numChildren) / 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
std::unordered_map<std::string, Int64> getPartitionsNeedAdjustingBlockNumbers(
|
|
||||||
zkutil::ZooKeeper & zk, const std::string & root, const std::vector<std::string> & shards, const std::vector<std::string> & tables)
|
|
||||||
{
|
|
||||||
std::unordered_map<std::string, Int64> result;
|
|
||||||
|
|
||||||
std::vector<std::string> use_shards = shards.empty() ? getAllShards(zk, root) : removeNotExistingShards(zk, root, shards);
|
|
||||||
|
|
||||||
for (const auto & shard : use_shards)
|
|
||||||
{
|
|
||||||
std::cout << "Shard: " << shard << std::endl;
|
|
||||||
std::vector<std::string> use_tables = tables.empty() ? getAllTables(zk, root, shard) : removeNotExistingTables(zk, root, shard, tables);
|
|
||||||
|
|
||||||
for (const auto & table : use_tables)
|
|
||||||
{
|
|
||||||
std::cout << "\tTable: " << table << std::endl;
|
|
||||||
std::string table_path = root + "/" + shard + "/" + table;
|
|
||||||
std::string blocks_path = table_path + "/block_numbers";
|
|
||||||
|
|
||||||
std::vector<std::string> partitions;
|
|
||||||
DB::MergeTreeDataFormatVersion format_version;
|
|
||||||
try
|
|
||||||
{
|
|
||||||
format_version = DB::ReplicatedMergeTreeTableMetadata::parse(zk.get(table_path + "/metadata")).data_format_version;
|
|
||||||
partitions = zk.getChildren(blocks_path);
|
|
||||||
}
|
|
||||||
catch (const DB::Exception & ex)
|
|
||||||
{
|
|
||||||
std::cerr << ex.displayText() << ", table " << table << " skipped." << std::endl;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto & partition : partitions)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
std::string part_path = blocks_path + "/" + partition;
|
|
||||||
Int64 partition_max_block = getMaxBlockNumberForPartition(zk, table_path, partition, format_version);
|
|
||||||
Int64 current_block_number = getCurrentBlockNumberForPartition(zk, part_path);
|
|
||||||
if (current_block_number < partition_max_block + 1)
|
|
||||||
{
|
|
||||||
std::cout << "\t\tPartition: " << partition << ": current block_number: " << current_block_number
|
|
||||||
<< ", max block number: " << partition_max_block << ". Adjusting is required." << std::endl;
|
|
||||||
result.emplace(part_path, partition_max_block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (const DB::Exception & ex)
|
|
||||||
{
|
|
||||||
std::cerr << ex.displayText() << ", partition " << partition << " skipped." << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void setCurrentBlockNumber(zkutil::ZooKeeper & zk, const std::string & path, Int64 new_current_block_number)
|
|
||||||
{
|
|
||||||
Int64 current_block_number = getCurrentBlockNumberForPartition(zk, path);
|
|
||||||
|
|
||||||
auto create_ephemeral_nodes = [&](size_t count)
|
|
||||||
{
|
|
||||||
std::string block_prefix = path + "/block-";
|
|
||||||
Coordination::Requests requests;
|
|
||||||
requests.reserve(count);
|
|
||||||
for (size_t i = 0; i != count; ++i)
|
|
||||||
requests.emplace_back(zkutil::makeCreateRequest(block_prefix, "", zkutil::CreateMode::EphemeralSequential));
|
|
||||||
auto responses = zk.multi(requests);
|
|
||||||
|
|
||||||
std::vector<std::string> paths_created;
|
|
||||||
paths_created.reserve(responses.size());
|
|
||||||
for (const auto & response : responses)
|
|
||||||
{
|
|
||||||
const auto * create_response = dynamic_cast<Coordination::CreateResponse*>(response.get());
|
|
||||||
if (!create_response)
|
|
||||||
{
|
|
||||||
std::cerr << "\tCould not create ephemeral node " << block_prefix << std::endl;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
paths_created.emplace_back(create_response->path_created);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::sort(paths_created.begin(), paths_created.end());
|
|
||||||
for (const auto & path_created : paths_created)
|
|
||||||
{
|
|
||||||
Int64 number = DB::parse<Int64>(path_created.c_str() + block_prefix.size(), path_created.size() - block_prefix.size());
|
|
||||||
if (number != current_block_number)
|
|
||||||
{
|
|
||||||
char suffix[11] = "";
|
|
||||||
size_t size = snprintf(suffix, sizeof(suffix), "%010lld", current_block_number);
|
|
||||||
std::string expected_path = block_prefix + std::string(suffix, size);
|
|
||||||
std::cerr << "\t" << path_created << ": Ephemeral node has been created with an unexpected path (expected something like "
|
|
||||||
<< expected_path << ")." << std::endl;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
std::cout << "\t" << path_created << std::endl;
|
|
||||||
++current_block_number;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (current_block_number >= new_current_block_number)
|
|
||||||
return;
|
|
||||||
|
|
||||||
std::cout << "Creating ephemeral sequential nodes:" << std::endl;
|
|
||||||
create_ephemeral_nodes(1); /// Firstly try to create just a single node.
|
|
||||||
|
|
||||||
/// Create other nodes in batches of 50 nodes.
|
|
||||||
while (current_block_number + 50 <= new_current_block_number) // NOLINT: clang-tidy thinks that the loop is infinite
|
|
||||||
create_ephemeral_nodes(50);
|
|
||||||
|
|
||||||
create_ephemeral_nodes(new_current_block_number - current_block_number);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int main(int argc, char ** argv)
|
|
||||||
try
|
|
||||||
{
|
|
||||||
/// Parse the command line.
|
|
||||||
namespace po = boost::program_options;
|
|
||||||
po::options_description desc("Allowed options");
|
|
||||||
desc.add_options()
|
|
||||||
("help,h", "show help")
|
|
||||||
("zookeeper,z", po::value<std::string>(), "Addresses of ZooKeeper instances, comma-separated. Example: example01e.clickhouse.com:2181")
|
|
||||||
("path,p", po::value<std::string>(), "[optional] Path of replica queue to insert node (without trailing slash). By default it's /clickhouse/tables")
|
|
||||||
("shard,s", po::value<std::string>(), "[optional] Shards to process, comma-separated. If not specified then the utility will process all the shards.")
|
|
||||||
("table,t", po::value<std::string>(), "[optional] Tables to process, comma-separated. If not specified then the utility will process all the tables.")
|
|
||||||
("dry-run", "[optional] Specify if you want this utility just to analyze block numbers without any changes.");
|
|
||||||
|
|
||||||
po::variables_map options;
|
|
||||||
po::store(po::parse_command_line(argc, argv, desc), options);
|
|
||||||
|
|
||||||
auto show_usage = [&]
|
|
||||||
{
|
|
||||||
std::cout << "Usage: " << std::endl;
|
|
||||||
std::cout << " " << argv[0] << " [options]" << std::endl;
|
|
||||||
std::cout << desc << std::endl;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (options.count("help") || (argc == 1))
|
|
||||||
{
|
|
||||||
std::cout << "This utility adjusts the /block_numbers zookeeper nodes to the correct block number in partition." << std::endl;
|
|
||||||
std::cout << "It might be useful when incorrect block numbers stored in zookeeper don't allow you to insert data into a table or drop/detach a partition." << std::endl;
|
|
||||||
show_usage();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!options.count("zookeeper"))
|
|
||||||
{
|
|
||||||
std::cerr << "Option --zookeeper should be set." << std::endl;
|
|
||||||
show_usage();
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string root = options.count("path") ? options.at("path").as<std::string>() : "/clickhouse/tables";
|
|
||||||
|
|
||||||
std::vector<std::string> shards, tables;
|
|
||||||
if (options.count("shard"))
|
|
||||||
boost::split(shards, options.at("shard").as<std::string>(), boost::algorithm::is_any_of(","));
|
|
||||||
if (options.count("table"))
|
|
||||||
boost::split(tables, options.at("table").as<std::string>(), boost::algorithm::is_any_of(","));
|
|
||||||
|
|
||||||
/// Check if the adjusting of the block numbers is required.
|
|
||||||
std::cout << "Checking if adjusting of the block numbers is required:" << std::endl;
|
|
||||||
zkutil::ZooKeeper zookeeper(options.at("zookeeper").as<std::string>());
|
|
||||||
auto part_paths_with_max_block_numbers = getPartitionsNeedAdjustingBlockNumbers(zookeeper, root, shards, tables);
|
|
||||||
|
|
||||||
if (part_paths_with_max_block_numbers.empty())
|
|
||||||
{
|
|
||||||
std::cout << "No adjusting required." << std::endl;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << "Required adjusting of " << part_paths_with_max_block_numbers.size() << " block numbers." << std::endl;
|
|
||||||
|
|
||||||
/// Adjust the block numbers.
|
|
||||||
if (options.count("dry-run"))
|
|
||||||
{
|
|
||||||
std::cout << "This is a dry-run, exiting." << std::endl;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << std::endl << "Adjusting the block numbers:" << std::endl;
|
|
||||||
for (const auto & [part_path, max_block_number] : part_paths_with_max_block_numbers)
|
|
||||||
setCurrentBlockNumber(zookeeper, part_path, max_block_number + 1);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
|
|
||||||
throw;
|
|
||||||
}
|
|
@ -1,2 +0,0 @@
|
|||||||
clickhouse_add_executable (zookeeper-create-entry-to-download-part main.cpp ${SRCS})
|
|
||||||
target_link_libraries (zookeeper-create-entry-to-download-part PRIVATE dbms clickhouse_common_zookeeper boost::program_options)
|
|
@ -1,47 +0,0 @@
|
|||||||
#include <list>
|
|
||||||
#include <Storages/MergeTree/ReplicatedMergeTreeLogEntry.h>
|
|
||||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
|
||||||
#include <boost/program_options.hpp>
|
|
||||||
|
|
||||||
|
|
||||||
int main(int argc, char ** argv)
|
|
||||||
try
|
|
||||||
{
|
|
||||||
boost::program_options::options_description desc("Allowed options");
|
|
||||||
desc.add_options()
|
|
||||||
("help,h", "produce help message")
|
|
||||||
("address,a", boost::program_options::value<std::string>()->required(),
|
|
||||||
"addresses of ZooKeeper instances, comma separated. Example: example01e.clickhouse.com:2181")
|
|
||||||
("path,p", boost::program_options::value<std::string>()->required(), "path of replica queue to insert node (without trailing slash)")
|
|
||||||
("name,n", boost::program_options::value<std::string>()->required(), "name of part to download")
|
|
||||||
;
|
|
||||||
|
|
||||||
boost::program_options::variables_map options;
|
|
||||||
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
|
|
||||||
|
|
||||||
if (options.count("help"))
|
|
||||||
{
|
|
||||||
std::cout << "Insert log entry to replication queue to download part from any replica." << std::endl;
|
|
||||||
std::cout << "Usage: " << argv[0] << " [options]" << std::endl;
|
|
||||||
std::cout << desc << std::endl;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string path = options.at("path").as<std::string>();
|
|
||||||
std::string name = options.at("name").as<std::string>();
|
|
||||||
|
|
||||||
zkutil::ZooKeeper zookeeper(options.at("address").as<std::string>());
|
|
||||||
|
|
||||||
DB::ReplicatedMergeTreeLogEntry entry;
|
|
||||||
entry.type = DB::ReplicatedMergeTreeLogEntry::MERGE_PARTS;
|
|
||||||
entry.source_parts = {name};
|
|
||||||
entry.new_part_name = name;
|
|
||||||
|
|
||||||
zookeeper.create(path + "/queue-", entry.toString(), zkutil::CreateMode::PersistentSequential);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
std::cerr << DB::getCurrentExceptionMessage(true) << '\n';
|
|
||||||
throw;
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user