mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 09:02:00 +00:00
Merge branch 'master' into podopt
This commit is contained in:
commit
25e607cce8
1
.exrc
Normal file
1
.exrc
Normal file
@ -0,0 +1 @@
|
||||
au BufRead,BufNewFile * set tabstop=4 softtabstop=0 expandtab shiftwidth=4 smarttab tags=tags,../tags
|
2
.github/workflows/tags_stable.yml
vendored
2
.github/workflows/tags_stable.yml
vendored
@ -51,7 +51,7 @@ jobs:
|
||||
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
|
||||
git add "./docs/changelogs/${GITHUB_TAG}.md"
|
||||
python ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v3
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -17,6 +17,7 @@
|
||||
|
||||
# logs
|
||||
*.log
|
||||
*.debuglog
|
||||
*.stderr
|
||||
*.stdout
|
||||
|
||||
|
2
.vimrc
2
.vimrc
@ -1,2 +0,0 @@
|
||||
au BufRead,BufNewFile ./* set tabstop=4 softtabstop=0 expandtab shiftwidth=4 smarttab tags=tags,../tags
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit e4e746a24eb56861a86f3672771e3308d8c40722
|
||||
Subproject commit afc36dfa9b0beb45bc4cd935060631cc80ba04a5
|
@ -295,6 +295,9 @@ if not args.use_existing_tables:
|
||||
|
||||
reportStageEnd("create")
|
||||
|
||||
# Let's sync the data to avoid writeback affects performance
|
||||
os.system("sync")
|
||||
|
||||
# By default, test all queries.
|
||||
queries_to_run = range(0, len(test_queries))
|
||||
|
||||
|
@ -17,7 +17,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
python3-pip \
|
||||
shellcheck \
|
||||
yamllint \
|
||||
&& pip3 install black==22.8.0 boto3 codespell==2.2.1 dohq-artifactory PyGithub unidiff pylint==2.6.2 \
|
||||
&& pip3 install black==22.8.0 boto3 codespell==2.2.1 dohq-artifactory mypy PyGithub unidiff pylint==2.6.2 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /root/.cache/pip
|
||||
|
||||
|
@ -11,17 +11,19 @@ def process_result(result_folder):
|
||||
description = ""
|
||||
test_results = []
|
||||
checks = (
|
||||
("header duplicates", "duplicate_output.txt"),
|
||||
("shellcheck", "shellcheck_output.txt"),
|
||||
("style", "style_output.txt"),
|
||||
("black", "black_output.txt"),
|
||||
("typos", "typos_output.txt"),
|
||||
("whitespaces", "whitespaces_output.txt"),
|
||||
("workflows", "workflows_output.txt"),
|
||||
("doc typos", "doc_spell_output.txt"),
|
||||
"duplicate includes",
|
||||
"shellcheck",
|
||||
"style",
|
||||
"black",
|
||||
"mypy",
|
||||
"typos",
|
||||
"whitespaces",
|
||||
"workflows",
|
||||
"docs spelling",
|
||||
)
|
||||
|
||||
for name, out_file in checks:
|
||||
for name in checks:
|
||||
out_file = name.replace(" ", "_") + "_output.txt"
|
||||
full_path = os.path.join(result_folder, out_file)
|
||||
if not os.path.exists(full_path):
|
||||
logging.info("No %s check log on path %s", name, full_path)
|
||||
|
@ -4,15 +4,17 @@
|
||||
|
||||
cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_output/check_status.tsv
|
||||
echo "Check duplicates" | ts
|
||||
./check-duplicate-includes.sh |& tee /test_output/duplicate_output.txt
|
||||
./check-duplicate-includes.sh |& tee /test_output/duplicate_includes_output.txt
|
||||
echo "Check style" | ts
|
||||
./check-style -n |& tee /test_output/style_output.txt
|
||||
echo "Check python formatting with black" | ts
|
||||
./check-black -n |& tee /test_output/black_output.txt
|
||||
echo "Check python type hinting with mypy" | ts
|
||||
./check-mypy -n |& tee /test_output/mypy_output.txt
|
||||
echo "Check typos" | ts
|
||||
./check-typos |& tee /test_output/typos_output.txt
|
||||
echo "Check docs spelling" | ts
|
||||
./check-doc-aspell |& tee /test_output/doc_spell_output.txt
|
||||
./check-doc-aspell |& tee /test_output/docs_spelling_output.txt
|
||||
echo "Check whitespaces" | ts
|
||||
./check-whitespaces -n |& tee /test_output/whitespaces_output.txt
|
||||
echo "Check workflows" | ts
|
||||
|
32
docs/changelogs/v22.10.3.27-stable.md
Normal file
32
docs/changelogs/v22.10.3.27-stable.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.10.3.27-stable (6d3b2985724) FIXME as compared to v22.10.2.11-stable (d2bfcaba002)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#42842](https://github.com/ClickHouse/ClickHouse/issues/42842): Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#42959](https://github.com/ClickHouse/ClickHouse/issues/42959): Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#43042](https://github.com/ClickHouse/ClickHouse/issues/43042): Add a CI step to mark commits as ready for release; soft-forbid launching a release script from branches but master. [#43017](https://github.com/ClickHouse/ClickHouse/pull/43017) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42864](https://github.com/ClickHouse/ClickHouse/issues/42864): Fix lowerUTF8()/upperUTF8() in case of symbol was in between 16-byte boundary (very frequent case of you have strings > 16 bytes long). [#42812](https://github.com/ClickHouse/ClickHouse/pull/42812) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#43173](https://github.com/ClickHouse/ClickHouse/issues/43173): Fix rare possible hung on query cancellation. [#42874](https://github.com/ClickHouse/ClickHouse/pull/42874) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#43064](https://github.com/ClickHouse/ClickHouse/issues/43064): Fix rare NOT_FOUND_COLUMN_IN_BLOCK error when projection is possible to use but there is no projection available. This fixes [#42771](https://github.com/ClickHouse/ClickHouse/issues/42771) . The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/25563. [#42938](https://github.com/ClickHouse/ClickHouse/pull/42938) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Backported in [#43075](https://github.com/ClickHouse/ClickHouse/issues/43075): Fix lambda parsing. Closes [#41848](https://github.com/ClickHouse/ClickHouse/issues/41848). [#42979](https://github.com/ClickHouse/ClickHouse/pull/42979) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#43444](https://github.com/ClickHouse/ClickHouse/issues/43444): - Fix several buffer over-reads. [#43159](https://github.com/ClickHouse/ClickHouse/pull/43159) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#43430](https://github.com/ClickHouse/ClickHouse/issues/43430): Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix a bug in CAST function parser [#42980](https://github.com/ClickHouse/ClickHouse/pull/42980) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix TSan errors (correctly ignore _exit interception) [#43009](https://github.com/ClickHouse/ClickHouse/pull/43009) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update SECURITY.md on new stable tags [#43365](https://github.com/ClickHouse/ClickHouse/pull/43365) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use all parameters with prefixes from ssm [#43467](https://github.com/ClickHouse/ClickHouse/pull/43467) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
29
docs/changelogs/v22.10.4.23-stable.md
Normal file
29
docs/changelogs/v22.10.4.23-stable.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.10.4.23-stable (352772987f4) FIXME as compared to v22.10.3.27-stable (6d3b2985724)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Backported in [#43487](https://github.com/ClickHouse/ClickHouse/issues/43487): Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/41431 and affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend to upgrade from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then incompatible versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#43053](https://github.com/ClickHouse/ClickHouse/issues/43053): Wait for all files are in sync before archiving them in integration tests. [#42891](https://github.com/ClickHouse/ClickHouse/pull/42891) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#43715](https://github.com/ClickHouse/ClickHouse/issues/43715): An issue with the following exception has been reported while trying to read a Parquet file from S3 into ClickHouse:. [#43297](https://github.com/ClickHouse/ClickHouse/pull/43297) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Backported in [#43576](https://github.com/ClickHouse/ClickHouse/issues/43576): Fix possible `Cannot create non-empty column with type Nothing` in functions if/multiIf. Closes [#43356](https://github.com/ClickHouse/ClickHouse/issues/43356). [#43368](https://github.com/ClickHouse/ClickHouse/pull/43368) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#43506](https://github.com/ClickHouse/ClickHouse/issues/43506): Fix a bug when row level filter uses default value of column. [#43387](https://github.com/ClickHouse/ClickHouse/pull/43387) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#43723](https://github.com/ClickHouse/ClickHouse/issues/43723): Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Temporarily disable `test_hive_query` [#43542](https://github.com/ClickHouse/ClickHouse/pull/43542) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Do not checkout submodules recursively [#43637](https://github.com/ClickHouse/ClickHouse/pull/43637) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use docker images cache from merged PRs in master and release branches [#43664](https://github.com/ClickHouse/ClickHouse/pull/43664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix pagination issue in GITHUB_JOB_ID() [#43681](https://github.com/ClickHouse/ClickHouse/pull/43681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
33
docs/changelogs/v22.11.2.30-stable.md
Normal file
33
docs/changelogs/v22.11.2.30-stable.md
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.11.2.30-stable (28f72d8ab09) FIXME as compared to v22.11.1.1360-stable (0d211ed1984)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Backported in [#43488](https://github.com/ClickHouse/ClickHouse/issues/43488): Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/41431 and affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend to upgrade from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then incompatible versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#43511](https://github.com/ClickHouse/ClickHouse/issues/43511): Restrict default access to named collections for user defined in config. It must have explicit `show_named_collections=1` to be able to see them. [#43325](https://github.com/ClickHouse/ClickHouse/pull/43325) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#43716](https://github.com/ClickHouse/ClickHouse/issues/43716): An issue with the following exception has been reported while trying to read a Parquet file from S3 into ClickHouse:. [#43297](https://github.com/ClickHouse/ClickHouse/pull/43297) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Backported in [#43431](https://github.com/ClickHouse/ClickHouse/issues/43431): Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#43577](https://github.com/ClickHouse/ClickHouse/issues/43577): Fix possible `Cannot create non-empty column with type Nothing` in functions if/multiIf. Closes [#43356](https://github.com/ClickHouse/ClickHouse/issues/43356). [#43368](https://github.com/ClickHouse/ClickHouse/pull/43368) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#43507](https://github.com/ClickHouse/ClickHouse/issues/43507): Fix a bug when row level filter uses default value of column. [#43387](https://github.com/ClickHouse/ClickHouse/pull/43387) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#43724](https://github.com/ClickHouse/ClickHouse/issues/43724): Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#43807](https://github.com/ClickHouse/ClickHouse/issues/43807): Optimized number of List requests to ZooKeeper when selecting a part to merge. Previously it could produce thousands of requests in some cases. Fixes [#43647](https://github.com/ClickHouse/ClickHouse/issues/43647). [#43675](https://github.com/ClickHouse/ClickHouse/pull/43675) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Update SECURITY.md on new stable tags [#43365](https://github.com/ClickHouse/ClickHouse/pull/43365) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use all parameters with prefixes from ssm [#43467](https://github.com/ClickHouse/ClickHouse/pull/43467) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Temporarily disable `test_hive_query` [#43542](https://github.com/ClickHouse/ClickHouse/pull/43542) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Do not checkout submodules recursively [#43637](https://github.com/ClickHouse/ClickHouse/pull/43637) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use docker images cache from merged PRs in master and release branches [#43664](https://github.com/ClickHouse/ClickHouse/pull/43664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix pagination issue in GITHUB_JOB_ID() [#43681](https://github.com/ClickHouse/ClickHouse/pull/43681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
34
docs/changelogs/v22.3.15.33-lts.md
Normal file
34
docs/changelogs/v22.3.15.33-lts.md
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.3.15.33-lts (4ef30f2c4b6) FIXME as compared to v22.3.14.23-lts (74956bfee4d)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Backported in [#43484](https://github.com/ClickHouse/ClickHouse/issues/43484): Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/41431 and affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend to upgrade from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then incompatible versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#42839](https://github.com/ClickHouse/ClickHouse/issues/42839): Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#43050](https://github.com/ClickHouse/ClickHouse/issues/43050): Wait for all files are in sync before archiving them in integration tests. [#42891](https://github.com/ClickHouse/ClickHouse/pull/42891) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#42963](https://github.com/ClickHouse/ClickHouse/issues/42963): Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#43039](https://github.com/ClickHouse/ClickHouse/issues/43039): Add a CI step to mark commits as ready for release; soft-forbid launching a release script from branches but master. [#43017](https://github.com/ClickHouse/ClickHouse/pull/43017) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#43427](https://github.com/ClickHouse/ClickHouse/issues/43427): Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#43720](https://github.com/ClickHouse/ClickHouse/issues/43720): Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Always run `BuilderReport` and `BuilderSpecialReport` in all CI types [#42684](https://github.com/ClickHouse/ClickHouse/pull/42684) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update SECURITY.md on new stable tags [#43365](https://github.com/ClickHouse/ClickHouse/pull/43365) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use all parameters with prefixes from ssm [#43467](https://github.com/ClickHouse/ClickHouse/pull/43467) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Temporarily disable `test_hive_query` [#43542](https://github.com/ClickHouse/ClickHouse/pull/43542) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Do not checkout submodules recursively [#43637](https://github.com/ClickHouse/ClickHouse/pull/43637) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use docker images cache from merged PRs in master and release branches [#43664](https://github.com/ClickHouse/ClickHouse/pull/43664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
32
docs/changelogs/v22.8.10.29-lts.md
Normal file
32
docs/changelogs/v22.8.10.29-lts.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.8.10.29-lts (d568a57f7af) FIXME as compared to v22.8.9.24-lts (a1b69551d40)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Backported in [#43485](https://github.com/ClickHouse/ClickHouse/issues/43485): Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/41431 and affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend to upgrade from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then incompatible versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#43051](https://github.com/ClickHouse/ClickHouse/issues/43051): Wait for all files are in sync before archiving them in integration tests. [#42891](https://github.com/ClickHouse/ClickHouse/pull/42891) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#43513](https://github.com/ClickHouse/ClickHouse/issues/43513): - Fix several buffer over-reads. [#43159](https://github.com/ClickHouse/ClickHouse/pull/43159) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#43428](https://github.com/ClickHouse/ClickHouse/issues/43428): Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#43580](https://github.com/ClickHouse/ClickHouse/issues/43580): Fix a bug when row level filter uses default value of column. [#43387](https://github.com/ClickHouse/ClickHouse/pull/43387) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#43721](https://github.com/ClickHouse/ClickHouse/issues/43721): Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix 02267_file_globs_schema_inference.sql flakiness [#41877](https://github.com/ClickHouse/ClickHouse/pull/41877) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Update SECURITY.md on new stable tags [#43365](https://github.com/ClickHouse/ClickHouse/pull/43365) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use all parameters with prefixes from ssm [#43467](https://github.com/ClickHouse/ClickHouse/pull/43467) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Temporarily disable `test_hive_query` [#43542](https://github.com/ClickHouse/ClickHouse/pull/43542) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Do not checkout submodules recursively [#43637](https://github.com/ClickHouse/ClickHouse/pull/43637) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use docker images cache from merged PRs in master and release branches [#43664](https://github.com/ClickHouse/ClickHouse/pull/43664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix pagination issue in GITHUB_JOB_ID() [#43681](https://github.com/ClickHouse/ClickHouse/pull/43681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
30
docs/changelogs/v22.9.5.25-stable.md
Normal file
30
docs/changelogs/v22.9.5.25-stable.md
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.9.5.25-stable (68ba857aa82) FIXME as compared to v22.9.4.32-stable (3db8bcf1a70)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#42841](https://github.com/ClickHouse/ClickHouse/issues/42841): Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#42965](https://github.com/ClickHouse/ClickHouse/issues/42965): Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#43041](https://github.com/ClickHouse/ClickHouse/issues/43041): Add a CI step to mark commits as ready for release; soft-forbid launching a release script from branches but master. [#43017](https://github.com/ClickHouse/ClickHouse/pull/43017) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42749](https://github.com/ClickHouse/ClickHouse/issues/42749): A segmentation fault related to DNS & c-ares has been reported. The below error ocurred in multiple threads: ``` 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008088 [ 356 ] {} <Fatal> BaseDaemon: ######################################## 2022-09-28 15:41:19.008,"2022.09.28 15:41:19.008147 [ 356 ] {} <Fatal> BaseDaemon: (version 22.8.5.29 (official build), build id: 92504ACA0B8E2267) (from thread 353) (no query) Received signal Segmentation fault (11)" 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008196 [ 356 ] {} <Fatal> BaseDaemon: Address: 0xf Access: write. Address not mapped to object. 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008216 [ 356 ] {} <Fatal> BaseDaemon: Stack trace: 0x188f8212 0x1626851b 0x1626a69e 0x16269b3f 0x16267eab 0x13cf8284 0x13d24afc 0x13c5217e 0x14ec2495 0x15ba440f 0x15b9d13b 0x15bb2699 0x1891ccb3 0x1891e00d 0x18ae0769 0x18ade022 0x7f76aa985609 0x7f76aa8aa133 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008274 [ 356 ] {} <Fatal> BaseDaemon: 2. Poco::Net::IPAddress::family() const @ 0x188f8212 in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008297 [ 356 ] {} <Fatal> BaseDaemon: 3. ? @ 0x1626851b in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008309 [ 356 ] {} <Fatal> BaseDaemon: 4. ? @ 0x1626a69e in /usr/bin/clickhouse ```. [#42234](https://github.com/ClickHouse/ClickHouse/pull/42234) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Backported in [#42863](https://github.com/ClickHouse/ClickHouse/issues/42863): Fix lowerUTF8()/upperUTF8() in case of symbol was in between 16-byte boundary (very frequent case of you have strings > 16 bytes long). [#42812](https://github.com/ClickHouse/ClickHouse/pull/42812) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#43063](https://github.com/ClickHouse/ClickHouse/issues/43063): Fix rare NOT_FOUND_COLUMN_IN_BLOCK error when projection is possible to use but there is no projection available. This fixes [#42771](https://github.com/ClickHouse/ClickHouse/issues/42771) . The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/25563. [#42938](https://github.com/ClickHouse/ClickHouse/pull/42938) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Backported in [#43443](https://github.com/ClickHouse/ClickHouse/issues/43443): - Fix several buffer over-reads. [#43159](https://github.com/ClickHouse/ClickHouse/pull/43159) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#43429](https://github.com/ClickHouse/ClickHouse/issues/43429): Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Always run `BuilderReport` and `BuilderSpecialReport` in all CI types [#42684](https://github.com/ClickHouse/ClickHouse/pull/42684) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update SECURITY.md on new stable tags [#43365](https://github.com/ClickHouse/ClickHouse/pull/43365) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use all parameters with prefixes from ssm [#43467](https://github.com/ClickHouse/ClickHouse/pull/43467) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
28
docs/changelogs/v22.9.6.20-stable.md
Normal file
28
docs/changelogs/v22.9.6.20-stable.md
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.9.6.20-stable (ef6343f9579) FIXME as compared to v22.9.5.25-stable (68ba857aa82)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Backported in [#43486](https://github.com/ClickHouse/ClickHouse/issues/43486): Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/41431 and affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend to upgrade from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then incompatible versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#43052](https://github.com/ClickHouse/ClickHouse/issues/43052): Wait for all files are in sync before archiving them in integration tests. [#42891](https://github.com/ClickHouse/ClickHouse/pull/42891) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#43505](https://github.com/ClickHouse/ClickHouse/issues/43505): Fix a bug when row level filter uses default value of column. [#43387](https://github.com/ClickHouse/ClickHouse/pull/43387) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#43722](https://github.com/ClickHouse/ClickHouse/issues/43722): Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix 02267_file_globs_schema_inference.sql flakiness [#41877](https://github.com/ClickHouse/ClickHouse/pull/41877) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Temporarily disable `test_hive_query` [#43542](https://github.com/ClickHouse/ClickHouse/pull/43542) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Do not checkout submodules recursively [#43637](https://github.com/ClickHouse/ClickHouse/pull/43637) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use docker images cache from merged PRs in master and release branches [#43664](https://github.com/ClickHouse/ClickHouse/pull/43664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix pagination issue in GITHUB_JOB_ID() [#43681](https://github.com/ClickHouse/ClickHouse/pull/43681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
@ -16,7 +16,7 @@ import SupersetDocker from '@site/docs/en/_snippets/_add_superset_detail.md';
|
||||
## Goal
|
||||
|
||||
In this guide you will learn how to:
|
||||
- Load the OpenCelliD data in Clickhouse
|
||||
- Load the OpenCelliD data in ClickHouse
|
||||
- Connect Apache Superset to ClickHouse
|
||||
- Build a dashboard based on data available in the dataset
|
||||
|
||||
@ -275,7 +275,7 @@ Here is a description of the columns taken from the OpenCelliD forum:
|
||||
To find your MCC check [Mobile network codes](https://en.wikipedia.org/wiki/Mobile_country_code), and use the three digits in the **Mobile country code** column.
|
||||
:::
|
||||
|
||||
The schema for this table was designed for compact storage on disk and query speed.
|
||||
The schema for this table was designed for compact storage on disk and query speed.
|
||||
- The `radio` data is stored as an `Enum8` (`UInt8`) rather than a string.
|
||||
- `mcc` or Mobile country code, is stored as a `UInt16` as we know the range is 1 - 999.
|
||||
- `lon` and `lat` are `Float64`.
|
||||
|
@ -56,6 +56,7 @@ As of November 8th, 2022, each TSV is approximately the following size and numbe
|
||||
- [Line by line commit history of a file](#line-by-line-commit-history-of-a-file)
|
||||
- [Unsolved Questions](#unsolved-questions)
|
||||
- [Git blame](#git-blame)
|
||||
- [Related Content](#related-content)
|
||||
|
||||
# Generating the data
|
||||
|
||||
@ -2497,3 +2498,7 @@ LIMIT 20
|
||||
We welcome exact and improved solutions here.
|
||||
|
||||
|
||||
# Related Content
|
||||
|
||||
- [Git commits and our community](https://clickhouse.com/blog/clickhouse-git-community-commits)
|
||||
- [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
|
||||
|
@ -22,5 +22,8 @@ functions in ClickHouse. The sample datasets include:
|
||||
- The [Cell Towers dataset](../getting-started/example-datasets/cell-towers.md) imports a CSV into ClickHouse
|
||||
- The [NYPD Complaint Data](../getting-started/example-datasets/nypd_complaint_data.md) demonstrates how to use data inference to simplify creating tables
|
||||
- The ["What's on the Menu?" dataset](../getting-started/example-datasets/menus.md) has an example of denormalizing data
|
||||
- The [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1) provides examples of defining a schema and loading a small Hacker News dataset
|
||||
- The [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) shows how JSON data can be loaded
|
||||
- The [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) has examples of loading data from s3
|
||||
|
||||
View the **Tutorials and Datasets** menu for a complete list of sample datasets.
|
||||
View the **Tutorials and Datasets** menu for a complete list of sample datasets.
|
||||
|
@ -8,7 +8,7 @@ slug: /en/install
|
||||
|
||||
You have two options for getting up and running with ClickHouse:
|
||||
|
||||
- **[ClickHouse Cloud](https://clickhouse.cloud/):** the official ClickHouse as a service, - built by, maintained, and supported by the creators of ClickHouse
|
||||
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** the official ClickHouse as a service, - built by, maintained, and supported by the creators of ClickHouse
|
||||
- **Self-managed ClickHouse:** ClickHouse can run on any Linux, FreeBSD, or Mac OS X with x86_64, AArch64, or PowerPC64LE CPU architecture
|
||||
|
||||
## ClickHouse Cloud
|
||||
@ -406,4 +406,3 @@ SELECT 1
|
||||
**Congratulations, the system works!**
|
||||
|
||||
To continue experimenting, you can download one of the test data sets or go through [tutorial](/docs/en/tutorial.md).
|
||||
|
||||
|
@ -1456,6 +1456,10 @@ If setting [input_format_with_types_use_header](../operations/settings/settings.
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
:::
|
||||
|
||||
## RowBinary format settings {#row-binary-format-settings}
|
||||
|
||||
- [format_binary_max_string_size](../operations/settings/settings.md#format_binary_max_string_size) - The maximum allowed size for String in RowBinary format. Default value - `1GiB`.
|
||||
|
||||
## Values {#data-format-values}
|
||||
|
||||
Prints every row in brackets. Rows are separated by commas. There is no comma after the last row. The values inside the brackets are also comma-separated. Numbers are output in a decimal format without quotes. Arrays are output in square brackets. Strings, dates, and dates with times are output in quotes. Escaping rules and parsing are similar to the [TabSeparated](#tabseparated) format. During formatting, extra spaces aren’t inserted, but during parsing, they are allowed and skipped (except for spaces inside array values, which are not allowed). [NULL](../sql-reference/syntax.md) is represented as `NULL`.
|
||||
|
@ -1,5 +1,8 @@
|
||||
---
|
||||
slug: /en/operations/backup
|
||||
---
|
||||
|
||||
[//]: # (This file is included in Manage > Backups)
|
||||
# Backup and Restore
|
||||
|
||||
- [Backup to a local disk](#backup-to-a-local-disk)
|
||||
- [Configuring backup/restore to use an S3 endpoint](#configuring-backuprestore-to-use-an-s3-endpoint)
|
||||
@ -55,7 +58,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
||||
- SETTINGS:
|
||||
- [`compression_method`](en/sql-reference/statements/create/table/#column-compression-codecs) and compression_level
|
||||
- `password` for the file on disk
|
||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||
|
||||
### Usage examples
|
||||
|
||||
@ -72,7 +75,7 @@ RESTORE TABLE test.table FROM Disk('backups', '1.zip')
|
||||
:::note
|
||||
The above RESTORE would fail if the table `test.table` contains data, you would have to drop the table in order to test the RESTORE, or use the setting `allow_non_empty_tables=true`:
|
||||
```
|
||||
RESTORE TABLE test.table FROM Disk('backups', '1.zip')
|
||||
RESTORE TABLE test.table FROM Disk('backups', '1.zip')
|
||||
SETTINGS allow_non_empty_tables=true
|
||||
```
|
||||
:::
|
||||
@ -101,7 +104,7 @@ BACKUP TABLE test.table TO Disk('backups', 'incremental-a.zip')
|
||||
|
||||
Restore all data from the incremental backup and the base_backup into a new table `test.table2`:
|
||||
```
|
||||
RESTORE TABLE test.table AS test.table2
|
||||
RESTORE TABLE test.table AS test.table2
|
||||
FROM Disk('backups', 'incremental-a.zip');
|
||||
```
|
||||
|
||||
@ -356,4 +359,3 @@ Data can be restored from backup using the `ALTER TABLE ... ATTACH PARTITION ...
|
||||
For more information about queries related to partition manipulations, see the [ALTER documentation](../sql-reference/statements/alter/partition.md#alter_manipulations-with-partitions).
|
||||
|
||||
A third-party tool is available to automate this approach: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup).
|
||||
|
@ -11,6 +11,7 @@ Main cache types:
|
||||
|
||||
- `mark_cache` — Cache of marks used by table engines of the [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) family.
|
||||
- `uncompressed_cache` — Cache of uncompressed data used by table engines of the [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) family.
|
||||
- Operating system page cache (used indirectly, for files with actual data).
|
||||
|
||||
Additional cache types:
|
||||
|
||||
@ -22,10 +23,4 @@ Additional cache types:
|
||||
- Schema inference cache.
|
||||
- [Filesystem cache](storing-data.md) over S3, Azure, Local and other disks.
|
||||
|
||||
Indirectly used:
|
||||
|
||||
- OS page cache.
|
||||
|
||||
To drop cache, use [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md) statements.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/caches/) <!--hide-->
|
||||
To drop one of the caches, use [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md#drop-mark-cache) statements.
|
||||
|
@ -668,7 +668,7 @@ log_query_views=1
|
||||
|
||||
## log_formatted_queries {#settings-log-formatted-queries}
|
||||
|
||||
Allows to log formatted queries to the [system.query_log](../../operations/system-tables/query_log.md) system table (populates `formatted_query` column in the [system.query_log](../../operations/system-tables/query_log.md)).
|
||||
Allows to log formatted queries to the [system.query_log](../../operations/system-tables/query_log.md) system table (populates `formatted_query` column in the [system.query_log](../../operations/system-tables/query_log.md)).
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -1807,6 +1807,41 @@ See also:
|
||||
|
||||
- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log)
|
||||
|
||||
## memory_profiler_step {#memory_profiler_step}
|
||||
|
||||
Sets the step of memory profiler. Whenever query memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stacktrace and will write it into [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log).
|
||||
|
||||
Possible values:
|
||||
|
||||
- A positive integer number of bytes.
|
||||
|
||||
- 0 for turning off the memory profiler.
|
||||
|
||||
Default value: 4,194,304 bytes (4 MiB).
|
||||
|
||||
## memory_profiler_sample_probability {#memory_profiler_sample_probability}
|
||||
|
||||
Sets the probability of collecting stacktraces at random allocations and deallocations and writing them into [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log).
|
||||
|
||||
Possible values:
|
||||
|
||||
- A positive floating-point number in the range [0..1].
|
||||
|
||||
- 0.0 for turning off the memory sampling.
|
||||
|
||||
Default value: 0.0.
|
||||
|
||||
## trace_profile_events {#trace_profile_events}
|
||||
|
||||
Enables or disables collecting stacktraces on each update of profile events along with the name of profile event and the value of increment and sending them into [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 1 — Tracing of profile events enabled.
|
||||
- 0 — Tracing of profile events disabled.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## allow_introspection_functions {#settings-allow_introspection_functions}
|
||||
|
||||
Enables or disables [introspections functions](../../sql-reference/functions/introspection.md) for query profiling.
|
||||
@ -4829,3 +4864,11 @@ Disabled by default.
|
||||
Allow skipping columns with unsupported types while schema inference for format BSONEachRow.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## RowBinary format settings {#row-binary-format-settings}
|
||||
|
||||
### format_binary_max_string_size {#format_binary_max_string_size}
|
||||
|
||||
The maximum allowed size for String in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit.
|
||||
|
||||
Default value: `1GiB`
|
||||
|
@ -5,7 +5,8 @@ slug: /en/operations/system-tables/trace_log
|
||||
|
||||
Contains stack traces collected by the sampling query profiler.
|
||||
|
||||
ClickHouse creates this table when the [trace_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) server configuration section is set. Also the [query_profiler_real_time_period_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns) and [query_profiler_cpu_time_period_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set.
|
||||
ClickHouse creates this table when the [trace_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) server configuration section is set. Also see settings: [query_profiler_real_time_period_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns), [query_profiler_cpu_time_period_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns), [memory_profiler_step](../../operations/settings/settings.md#memory_profiler_step),
|
||||
[memory_profiler_sample_probability](../../operations/settings/settings.md#memory_profiler_sample_probability), [trace_profile_events](../../operations/settings/settings.md#trace_profile_events).
|
||||
|
||||
To analyze logs, use the `addressToLine`, `addressToLineWithInlines`, `addressToSymbol` and `demangle` introspection functions.
|
||||
|
||||
@ -29,6 +30,8 @@ Columns:
|
||||
- `CPU` represents collecting stack traces by CPU time.
|
||||
- `Memory` represents collecting allocations and deallocations when memory allocation exceeds the subsequent watermark.
|
||||
- `MemorySample` represents collecting random allocations and deallocations.
|
||||
- `MemoryPeak` represents collecting updates of peak memory usage.
|
||||
- `ProfileEvent` represents collecting of increments of profile events.
|
||||
|
||||
- `thread_number` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Thread identifier.
|
||||
|
||||
@ -36,6 +39,12 @@ Columns:
|
||||
|
||||
- `trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process.
|
||||
|
||||
- `size` ([Int64](../../sql-reference/data-types/int-uint.md)) - For trace types `Memory`, `MemorySample` or `MemoryPeak` is the amount of memory allocated, for other trace types is 0.
|
||||
|
||||
- `event` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) - For trace type `ProfileEvent` is the name of updated profile event, for other trace types is an empty string.
|
||||
|
||||
- `increment` ([UInt64](../../sql-reference/data-types/int-uint.md)) - For trace type `ProfileEvent` is the amount of incremnt of profile event, for other trace types is 0.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
|
@ -286,3 +286,7 @@ end script
|
||||
If you use antivirus software configure it to skip folders with ClickHouse datafiles (`/var/lib/clickhouse`) otherwise performance may be reduced and you may experience unexpected errors during data ingestion and background merges.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/tips/)
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Getting started with ClickHouse? Here are 13 "Deadly Sins" and how to avoid them](https://clickhouse.com/blog/common-getting-started-issues-with-clickhouse)
|
||||
|
@ -1,5 +1,8 @@
|
||||
---
|
||||
slug: /en/operations/update
|
||||
---
|
||||
|
||||
[//]: # (This file is included in Manage > Updates)
|
||||
# Update
|
||||
|
||||
## Self-managed ClickHouse Upgrade
|
||||
|
@ -117,3 +117,8 @@ Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec.
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/utils/clickhouse-local/) <!--hide-->
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1)
|
||||
- [Exploring massive, real-world data sets: 100+ Years of Weather Records in ClickHouse](https://clickhouse.com/blog/real-world-data-noaa-climate-data)
|
||||
|
@ -32,8 +32,8 @@ The null hypothesis is that means of populations are equal. Normal distribution
|
||||
|
||||
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
||||
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
||||
- [calculated confidence-interval-low.] [Float64](../../../sql-reference/data-types/float.md).
|
||||
- [calculated confidence-interval-high.] [Float64](../../../sql-reference/data-types/float.md).
|
||||
- calculated confidence-interval-low. [Float64](../../../sql-reference/data-types/float.md).
|
||||
- calculated confidence-interval-high. [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
|
||||
**Example**
|
||||
|
@ -95,3 +95,6 @@ Result:
|
||||
└─────────────────────────────────────────────────────────────────────────────────────────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Exploring massive, real-world data sets: 100+ Years of Weather Records in ClickHouse](https://clickhouse.com/blog/real-world-data-noaa-climate-data)
|
||||
|
@ -75,3 +75,7 @@ SELECT * FROM json FORMAT JSONEachRow
|
||||
```text
|
||||
{"o":{"a":1,"b":{"c":2,"d":[1,2,3]}}}
|
||||
```
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json)
|
||||
|
@ -1,4 +1,4 @@
|
||||
:::tip
|
||||
If you are using a dictionary with ClickHouse Cloud please use the DDL query option to create your dictionaries, and create your dictionary as user `default`.
|
||||
Also, verify the list of supported dictionary sources in the [Cloud Compatibility guide](/docs/en/whats-new/cloud-capabilities.md).
|
||||
If you are using a dictionary with ClickHouse Cloud please use the DDL query option to create your dictionaries, and create your dictionary as user `default`.
|
||||
Also, verify the list of supported dictionary sources in the [Cloud Compatibility guide](/docs/en/cloud/reference/cloud-compatibility.md).
|
||||
:::
|
||||
|
@ -134,3 +134,7 @@ Result:
|
||||
│ [[[(3,1),(0,1),(0,-1),(3,-1)]]] │ Value │
|
||||
└─────────────────────────────────┴───────┘
|
||||
```
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Exploring massive, real-world data sets: 100+ Years of Weather Records in ClickHouse](https://clickhouse.com/blog/real-world-data-noaa-climate-data)
|
||||
|
@ -410,35 +410,35 @@ Converts a date with time to a certain fixed date, while preserving the time.
|
||||
|
||||
## toRelativeYearNum
|
||||
|
||||
Converts a date with time or date to the number of the year, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the year, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeQuarterNum
|
||||
|
||||
Converts a date with time or date to the number of the quarter, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the quarter, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeMonthNum
|
||||
|
||||
Converts a date with time or date to the number of the month, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the month, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeWeekNum
|
||||
|
||||
Converts a date with time or date to the number of the week, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the week, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeDayNum
|
||||
|
||||
Converts a date with time or date to the number of the day, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the day, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeHourNum
|
||||
|
||||
Converts a date with time or date to the number of the hour, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the hour, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeMinuteNum
|
||||
|
||||
Converts a date with time or date to the number of the minute, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the minute, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeSecondNum
|
||||
|
||||
Converts a date with time or date to the number of the second, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the second, starting from a certain fixed point in the past.
|
||||
|
||||
## toISOYear
|
||||
|
||||
@ -517,6 +517,154 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## age
|
||||
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
|
||||
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
age('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
- `day` (possible abbreviations: `dd`, `d`)
|
||||
- `week` (possible abbreviations: `wk`, `ww`)
|
||||
- `month` (possible abbreviations: `mm`, `m`)
|
||||
- `quarter` (possible abbreviations: `qq`, `q`)
|
||||
- `year` (possible abbreviations: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 24 │
|
||||
└───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
age('day', s, e) AS day_age,
|
||||
age('month', s, e) AS month__age,
|
||||
age('year', s, e) AS year_age;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
|
||||
└────────────┴────────────┴─────────┴────────────┴──────────┘
|
||||
```
|
||||
|
||||
|
||||
## date\_diff
|
||||
|
||||
Returns the count of the specified `unit` boundaries crossed between the `startdate` and `enddate`.
|
||||
The difference is calculated using relative units, e.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Aliases: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
- `day` (possible abbreviations: `dd`, `d`)
|
||||
- `week` (possible abbreviations: `wk`, `ww`)
|
||||
- `month` (possible abbreviations: `mm`, `m`)
|
||||
- `quarter` (possible abbreviations: `qq`, `q`)
|
||||
- `year` (possible abbreviations: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
dateDiff('day', s, e) AS day_diff,
|
||||
dateDiff('month', s, e) AS month__diff,
|
||||
dateDiff('year', s, e) AS year_diff;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
|
||||
└────────────┴────────────┴──────────┴─────────────┴───────────┘
|
||||
```
|
||||
|
||||
## date\_trunc
|
||||
|
||||
Truncates date and time data to the specified part of date.
|
||||
@ -637,80 +785,6 @@ Result:
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_diff
|
||||
|
||||
Returns the difference between two dates or dates with time values.
|
||||
The difference is calculated using relative units, e.g. the difference between `2022-01-01` and `2021-12-29` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Aliases: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
dateDiff('day', s, e) AS day_diff,
|
||||
dateDiff('month', s, e) AS month__diff,
|
||||
dateDiff('year', s, e) AS year_diff;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
|
||||
└────────────┴────────────┴──────────┴─────────────┴───────────┘
|
||||
```
|
||||
|
||||
## date\_sub
|
||||
|
||||
Subtracts the time interval or date interval from the provided date or date with time.
|
||||
|
@ -1865,6 +1865,17 @@ Next, specify the path to `libcatboostmodel.<so|dylib>` in the clickhouse config
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
For security and isolation reasons, the model evaluation does not run in the server process but in the clickhouse-library-bridge process.
|
||||
At the first execution of `catboostEvaluate()`, the server starts the library bridge process if it is not running already. Both processes
|
||||
communicate using a HTTP interface. By default, port `9012` is used. A different port can be specified as follows - this is useful if port
|
||||
`9012` is already assigned to a different service.
|
||||
|
||||
``` xml
|
||||
<library_bridge>
|
||||
<port>9019</port>
|
||||
</library_bridge>
|
||||
```
|
||||
|
||||
2. Train a catboost model using libcatboost
|
||||
|
||||
See [Training and applying models](https://catboost.ai/docs/features/training.html#training) for how to train catboost models from a training data set.
|
||||
|
@ -464,5 +464,39 @@ Removes the query string and fragment identifier. The question mark and number s
|
||||
|
||||
### cutURLParameter(URL, name)
|
||||
|
||||
Removes the ‘name’ URL parameter, if present. This function works under the assumption that the parameter name is encoded in the URL exactly the same way as in the passed argument.
|
||||
Removes the `name` parameter from URL, if present. This function does not encode or decode characters in parameter names, e.g. `Client ID` and `Client%20ID` are treated as different parameter names.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
cutURLParameter(URL, name)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `url` — URL. [String](../../sql-reference/data-types/string.md).
|
||||
- `name` — name of URL parameter. [String](../../sql-reference/data-types/string.md) or [Array](../../sql-reference/data-types/array.md) of Strings.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- URL with `name` URL parameter removed.
|
||||
|
||||
Type: `String`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
cutURLParameter('http://bigmir.net/?a=b&c=d&e=f#g', 'a') as url_without_a,
|
||||
cutURLParameter('http://bigmir.net/?a=b&c=d&e=f#g', ['c', 'e']) as url_without_c_and_e;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─url_without_a────────────────┬─url_without_c_and_e──────┐
|
||||
│ http://bigmir.net/?c=d&e=f#g │ http://bigmir.net/?a=b#g │
|
||||
└──────────────────────────────┴──────────────────────────┘
|
||||
```
|
||||
|
@ -162,7 +162,7 @@ ALTER TABLE table_name [ON CLUSTER cluster] FREEZE [PARTITION partition_expr] [W
|
||||
|
||||
This query creates a local backup of a specified partition. If the `PARTITION` clause is omitted, the query creates the backup of all partitions at once.
|
||||
|
||||
:::note
|
||||
:::note
|
||||
The entire backup process is performed without stopping the server.
|
||||
:::
|
||||
|
||||
@ -172,9 +172,9 @@ At the time of execution, for a data snapshot, the query creates hardlinks to a
|
||||
|
||||
- `/var/lib/clickhouse/` is the working ClickHouse directory specified in the config.
|
||||
- `N` is the incremental number of the backup.
|
||||
- if the `WITH NAME` parameter is specified, then the value of the `'backup_name'` parameter is used instead of the incremental number.
|
||||
- if the `WITH NAME` parameter is specified, then the value of the `'backup_name'` parameter is used instead of the incremental number.
|
||||
|
||||
:::note
|
||||
:::note
|
||||
If you use [a set of disks for data storage in a table](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-multiple-volumes), the `shadow/N` directory appears on every disk, storing data parts that matched by the `PARTITION` expression.
|
||||
:::
|
||||
|
||||
@ -194,7 +194,7 @@ To restore data from a backup, do the following:
|
||||
|
||||
Restoring from a backup does not require stopping the server.
|
||||
|
||||
For more information about backups and restoring data, see the [Data Backup](/docs/en/manage/backups.mdx) section.
|
||||
For more information about backups and restoring data, see the [Data Backup](/docs/en/operations/backup.md) section.
|
||||
|
||||
## UNFREEZE PARTITION
|
||||
|
||||
|
@ -587,3 +587,8 @@ ORDER BY
|
||||
│ ambient_temp │ 2020-03-01 12:00:00 │ 16 │ 16 │
|
||||
└──────────────┴─────────────────────┴───────┴─────────────────────────┘
|
||||
```
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
|
||||
- [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3)
|
||||
|
@ -424,23 +424,23 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
## toRelativeYearNum {#torelativeyearnum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер года, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер года, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeQuarterNum {#torelativequarternum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер квартала, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер квартала, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeMonthNum {#torelativemonthnum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер месяца, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер месяца, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeWeekNum {#torelativeweeknum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер недели, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер недели, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeDayNum {#torelativedaynum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер дня, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер дня, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeHourNum {#torelativehournum}
|
||||
|
||||
@ -456,7 +456,7 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
## toISOYear {#toisoyear}
|
||||
|
||||
Переводит дату-с-временем или дату в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
|
||||
Переводит дату или дату-с-временем в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -479,7 +479,7 @@ SELECT
|
||||
|
||||
## toISOWeek {#toisoweek}
|
||||
|
||||
Переводит дату-с-временем или дату в число типа UInt8, содержащее номер ISO недели.
|
||||
Переводит дату или дату-с-временем в число типа UInt8, содержащее номер ISO недели.
|
||||
Начало ISO года отличается от начала обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) первая неделя года - это неделя с четырьмя или более днями в этом году.
|
||||
|
||||
1 Января 2017 г. - воскресение, т.е. первая ISO неделя 2017 года началась в понедельник 2 января, поэтому 1 января 2017 это последняя неделя 2016 года.
|
||||
@ -503,7 +503,7 @@ SELECT
|
||||
```
|
||||
|
||||
## toWeek(date\[, mode\]\[, timezone\]) {#toweek}
|
||||
Переводит дату-с-временем или дату в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
|
||||
Переводит дату или дату-с-временем в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
|
||||
|
||||
`toISOWeek() ` эквивалентно `toWeek(date,3)`.
|
||||
|
||||
@ -569,6 +569,132 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## age
|
||||
|
||||
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 секунду.
|
||||
Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
age('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
- `minute` (возможные сокращения: `mi`, `n`)
|
||||
- `hour` (возможные сокращения: `hh`, `h`)
|
||||
- `day` (возможные сокращения: `dd`, `d`)
|
||||
- `week` (возможные сокращения: `wk`, `ww`)
|
||||
- `month` (возможные сокращения: `mm`, `m`)
|
||||
- `quarter` (возможные сокращения: `qq`, `q`)
|
||||
- `year` (возможные сокращения: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 24 │
|
||||
└───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
age('day', s, e) AS day_age,
|
||||
age('month', s, e) AS month__age,
|
||||
age('year', s, e) AS year_age;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
|
||||
└────────────┴────────────┴─────────┴────────────┴──────────┘
|
||||
```
|
||||
|
||||
## date\_diff {#date_diff}
|
||||
|
||||
Вычисляет разницу указанных границ `unit` пересекаемых между `startdate` и `enddate`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Синонимы: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
- `minute` (возможные сокращения: `mi`, `n`)
|
||||
- `hour` (возможные сокращения: `hh`, `h`)
|
||||
- `day` (возможные сокращения: `dd`, `d`)
|
||||
- `week` (возможные сокращения: `wk`, `ww`)
|
||||
- `month` (возможные сокращения: `mm`, `m`)
|
||||
- `quarter` (возможные сокращения: `qq`, `q`)
|
||||
- `year` (возможные сокращения: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date_trunc {#date_trunc}
|
||||
|
||||
Отсекает от даты и времени части, меньшие чем указанная часть.
|
||||
@ -689,60 +815,6 @@ SELECT date_add(YEAR, 3, toDate('2018-01-01'));
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_diff {#date_diff}
|
||||
|
||||
Вычисляет разницу между двумя значениями дат или дат со временем.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Синонимы: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_sub {#date_sub}
|
||||
|
||||
Вычитает интервал времени или даты из указанной даты или даты со временем.
|
||||
|
@ -404,5 +404,39 @@ SELECT netloc('http://paul@www.example.com:80/');
|
||||
|
||||
### cutURLParameter(URL, name) {#cuturlparameterurl-name}
|
||||
|
||||
Удаляет параметр URL с именем name, если такой есть. Функция работает при допущении, что имя параметра закодировано в URL в точности таким же образом, что и в переданном аргументе.
|
||||
Удаляет параметр с именем `name` из URL, если такой есть. Функция не кодирует или декодирует символы в именах параметров. Например `Client ID` и `Client%20ID` обрабатываются как разные имена параметров.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
cutURLParameter(URL, name)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `url` — URL. [String](../../sql-reference/data-types/string.md).
|
||||
- `name` — имя параметра URL. [String](../../sql-reference/data-types/string.md) или [Array](../../sql-reference/data-types/array.md) состоящий из строк.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- URL с удалённым параметром URL с именем `name`.
|
||||
|
||||
Type: `String`.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
cutURLParameter('http://bigmir.net/?a=b&c=d&e=f#g', 'a') as url_without_a,
|
||||
cutURLParameter('http://bigmir.net/?a=b&c=d&e=f#g', ['c', 'e']) as url_without_c_and_e;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─url_without_a────────────────┬─url_without_c_and_e──────┐
|
||||
│ http://bigmir.net/?c=d&e=f#g │ http://bigmir.net/?a=b#g │
|
||||
└──────────────────────────────┴──────────────────────────┘
|
||||
```
|
||||
|
@ -1 +0,0 @@
|
||||
../../../en/sql-reference/table-functions/format.md
|
75
docs/ru/sql-reference/table-functions/format.md
Normal file
75
docs/ru/sql-reference/table-functions/format.md
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
slug: /ru/sql-reference/table-functions/format
|
||||
sidebar_position: 56
|
||||
sidebar_label: format
|
||||
---
|
||||
|
||||
# format
|
||||
|
||||
Extracts table structure from data and parses it according to specified input format.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
format(format_name, data)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `format_name` — The [format](../../interfaces/formats.md#formats) of the data.
|
||||
- `data` — String literal or constant expression that returns a string containing data in specified format
|
||||
|
||||
**Returned value**
|
||||
|
||||
A table with data parsed from `data` argument according specified format and extracted schema.
|
||||
|
||||
**Examples**
|
||||
|
||||
**Query:**
|
||||
``` sql
|
||||
:) select * from format(JSONEachRow,
|
||||
$$
|
||||
{"a": "Hello", "b": 111}
|
||||
{"a": "World", "b": 123}
|
||||
{"a": "Hello", "b": 112}
|
||||
{"a": "World", "b": 124}
|
||||
$$)
|
||||
```
|
||||
|
||||
**Result:**
|
||||
|
||||
```text
|
||||
┌───b─┬─a─────┐
|
||||
│ 111 │ Hello │
|
||||
│ 123 │ World │
|
||||
│ 112 │ Hello │
|
||||
│ 124 │ World │
|
||||
└─────┴───────┘
|
||||
```
|
||||
|
||||
**Query:**
|
||||
```sql
|
||||
|
||||
:) desc format(JSONEachRow,
|
||||
$$
|
||||
{"a": "Hello", "b": 111}
|
||||
{"a": "World", "b": 123}
|
||||
{"a": "Hello", "b": 112}
|
||||
{"a": "World", "b": 124}
|
||||
$$)
|
||||
```
|
||||
|
||||
**Result:**
|
||||
|
||||
```text
|
||||
┌─name─┬─type──────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ b │ Nullable(Float64) │ │ │ │ │ │
|
||||
│ a │ Nullable(String) │ │ │ │ │ │
|
||||
└──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Formats](../../interfaces/formats.md)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/format) <!--hide-->
|
@ -19,7 +19,7 @@ then
|
||||
# Will make a repository with website content as the only commit.
|
||||
git init
|
||||
git remote add origin "${GIT_PROD_URI}"
|
||||
git config user.email "robot-clickhouse@clickhouse.com"
|
||||
git config user.email "robot-clickhouse@users.noreply.github.com"
|
||||
git config user.name "robot-clickhouse"
|
||||
|
||||
# Add files.
|
||||
|
@ -1 +0,0 @@
|
||||
../../../en/sql-reference/table-functions/format.md
|
75
docs/zh/sql-reference/table-functions/format.md
Normal file
75
docs/zh/sql-reference/table-functions/format.md
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
slug: /zh/sql-reference/table-functions/format
|
||||
sidebar_position: 56
|
||||
sidebar_label: format
|
||||
---
|
||||
|
||||
# format
|
||||
|
||||
Extracts table structure from data and parses it according to specified input format.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
format(format_name, data)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `format_name` — The [format](../../interfaces/formats.md#formats) of the data.
|
||||
- `data` — String literal or constant expression that returns a string containing data in specified format
|
||||
|
||||
**Returned value**
|
||||
|
||||
A table with data parsed from `data` argument according specified format and extracted schema.
|
||||
|
||||
**Examples**
|
||||
|
||||
**Query:**
|
||||
``` sql
|
||||
:) select * from format(JSONEachRow,
|
||||
$$
|
||||
{"a": "Hello", "b": 111}
|
||||
{"a": "World", "b": 123}
|
||||
{"a": "Hello", "b": 112}
|
||||
{"a": "World", "b": 124}
|
||||
$$)
|
||||
```
|
||||
|
||||
**Result:**
|
||||
|
||||
```text
|
||||
┌───b─┬─a─────┐
|
||||
│ 111 │ Hello │
|
||||
│ 123 │ World │
|
||||
│ 112 │ Hello │
|
||||
│ 124 │ World │
|
||||
└─────┴───────┘
|
||||
```
|
||||
|
||||
**Query:**
|
||||
```sql
|
||||
|
||||
:) desc format(JSONEachRow,
|
||||
$$
|
||||
{"a": "Hello", "b": 111}
|
||||
{"a": "World", "b": 123}
|
||||
{"a": "Hello", "b": 112}
|
||||
{"a": "World", "b": 124}
|
||||
$$)
|
||||
```
|
||||
|
||||
**Result:**
|
||||
|
||||
```text
|
||||
┌─name─┬─type──────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ b │ Nullable(Float64) │ │ │ │ │ │
|
||||
│ a │ Nullable(String) │ │ │ │ │ │
|
||||
└──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Formats](../../interfaces/formats.md)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/format) <!--hide-->
|
@ -13,7 +13,6 @@ clickhouse_embed_binaries(
|
||||
|
||||
set(CLICKHOUSE_KEEPER_SOURCES
|
||||
Keeper.cpp
|
||||
TinyContext.cpp
|
||||
)
|
||||
|
||||
set (CLICKHOUSE_KEEPER_LINK
|
||||
@ -49,6 +48,8 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateMachine.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateManager.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStorage.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperAsynchronousMetrics.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/TinyContext.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/pathUtils.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/SessionExpiryQueue.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/SummingStateMachine.cpp
|
||||
@ -64,7 +65,18 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/KeeperTCPHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/TCPServer.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/NotFoundHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/ProtocolServerAdapter.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/PrometheusRequestHandler.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/PrometheusMetricsWriter.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTPRequestHandlerFactoryMain.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServer.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/ReadHeaders.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServerConnection.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServerRequest.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServerResponse.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServerConnectionFactory.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp
|
||||
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CachedCompressedReadBuffer.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CheckingCompressedReadBuffer.cpp
|
||||
@ -96,9 +108,7 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
${CMAKE_CURRENT_BINARY_DIR}/../../src/Daemon/GitHash.generated.cpp
|
||||
|
||||
Keeper.cpp
|
||||
TinyContext.cpp
|
||||
clickhouse-keeper.cpp
|
||||
|
||||
)
|
||||
|
||||
clickhouse_add_executable(clickhouse-keeper ${CLICKHOUSE_KEEPER_STANDALONE_SOURCES})
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <Interpreters/DNSCacheUpdater.h>
|
||||
#include <Coordination/Defines.h>
|
||||
#include <Common/Config/ConfigReloader.h>
|
||||
#include <Server/TCPServer.h>
|
||||
#include <filesystem>
|
||||
#include <IO/UseSSL.h>
|
||||
#include <Core/ServerUUID.h>
|
||||
@ -22,8 +21,15 @@
|
||||
#include <Poco/Environment.h>
|
||||
#include <sys/stat.h>
|
||||
#include <pwd.h>
|
||||
#include <Coordination/FourLetterCommand.h>
|
||||
|
||||
#include <Coordination/FourLetterCommand.h>
|
||||
#include <Coordination/KeeperAsynchronousMetrics.h>
|
||||
|
||||
#include <Server/HTTP/HTTPServer.h>
|
||||
#include <Server/TCPServer.h>
|
||||
#include <Server/HTTPHandlerFactory.h>
|
||||
|
||||
#include "Core/Defines.h"
|
||||
#include "config.h"
|
||||
#include "config_version.h"
|
||||
|
||||
@ -52,6 +58,16 @@ int mainEntryClickHouseKeeper(int argc, char ** argv)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KEEPER_STANDALONE_BUILD
|
||||
|
||||
// Weak symbols don't work correctly on Darwin
|
||||
// so we have a stub implementation to avoid linker errors
|
||||
void collectCrashLog(
|
||||
Int32, UInt64, const String &, const StackTrace &)
|
||||
{}
|
||||
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -261,6 +277,60 @@ void Keeper::defineOptions(Poco::Util::OptionSet & options)
|
||||
BaseDaemon::defineOptions(options);
|
||||
}
|
||||
|
||||
struct Keeper::KeeperHTTPContext : public IHTTPContext
|
||||
{
|
||||
explicit KeeperHTTPContext(TinyContextPtr context_)
|
||||
: context(std::move(context_))
|
||||
{}
|
||||
|
||||
uint64_t getMaxHstsAge() const override
|
||||
{
|
||||
return context->getConfigRef().getUInt64("keeper_server.hsts_max_age", 0);
|
||||
}
|
||||
|
||||
uint64_t getMaxUriSize() const override
|
||||
{
|
||||
return context->getConfigRef().getUInt64("keeper_server.http_max_uri_size", 1048576);
|
||||
}
|
||||
|
||||
uint64_t getMaxFields() const override
|
||||
{
|
||||
return context->getConfigRef().getUInt64("keeper_server.http_max_fields", 1000000);
|
||||
}
|
||||
|
||||
uint64_t getMaxFieldNameSize() const override
|
||||
{
|
||||
return context->getConfigRef().getUInt64("keeper_server.http_max_field_name_size", 1048576);
|
||||
}
|
||||
|
||||
uint64_t getMaxFieldValueSize() const override
|
||||
{
|
||||
return context->getConfigRef().getUInt64("keeper_server.http_max_field_value_size", 1048576);
|
||||
}
|
||||
|
||||
uint64_t getMaxChunkSize() const override
|
||||
{
|
||||
return context->getConfigRef().getUInt64("keeper_server.http_max_chunk_size", 100_GiB);
|
||||
}
|
||||
|
||||
Poco::Timespan getReceiveTimeout() const override
|
||||
{
|
||||
return context->getConfigRef().getUInt64("keeper_server.http_receive_timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
|
||||
}
|
||||
|
||||
Poco::Timespan getSendTimeout() const override
|
||||
{
|
||||
return context->getConfigRef().getUInt64("keeper_server.http_send_timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
|
||||
}
|
||||
|
||||
TinyContextPtr context;
|
||||
};
|
||||
|
||||
HTTPContextPtr Keeper::httpContext()
|
||||
{
|
||||
return std::make_shared<KeeperHTTPContext>(tiny_context);
|
||||
}
|
||||
|
||||
int Keeper::main(const std::vector<std::string> & /*args*/)
|
||||
try
|
||||
{
|
||||
@ -335,6 +405,25 @@ try
|
||||
DNSResolver::instance().setDisableCacheFlag();
|
||||
|
||||
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
|
||||
std::mutex servers_lock;
|
||||
auto servers = std::make_shared<std::vector<ProtocolServerAdapter>>();
|
||||
|
||||
tiny_context = std::make_shared<TinyContext>();
|
||||
/// This object will periodically calculate some metrics.
|
||||
KeeperAsynchronousMetrics async_metrics(
|
||||
tiny_context,
|
||||
config().getUInt("asynchronous_metrics_update_period_s", 1),
|
||||
[&]() -> std::vector<ProtocolServerMetrics>
|
||||
{
|
||||
std::vector<ProtocolServerMetrics> metrics;
|
||||
|
||||
std::lock_guard lock(servers_lock);
|
||||
metrics.reserve(servers->size());
|
||||
for (const auto & server : *servers)
|
||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()});
|
||||
return metrics;
|
||||
}
|
||||
);
|
||||
|
||||
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
||||
|
||||
@ -346,15 +435,13 @@ try
|
||||
listen_try = true;
|
||||
}
|
||||
|
||||
auto servers = std::make_shared<std::vector<ProtocolServerAdapter>>();
|
||||
|
||||
/// Initialize keeper RAFT. Do nothing if no keeper_server in config.
|
||||
tiny_context.initializeKeeperDispatcher(/* start_async = */ true);
|
||||
FourLetterCommandFactory::registerCommands(*tiny_context.getKeeperDispatcher());
|
||||
tiny_context->initializeKeeperDispatcher(/* start_async = */ true);
|
||||
FourLetterCommandFactory::registerCommands(*tiny_context->getKeeperDispatcher());
|
||||
|
||||
auto config_getter = [this] () -> const Poco::Util::AbstractConfiguration &
|
||||
{
|
||||
return tiny_context.getConfigRef();
|
||||
return tiny_context->getConfigRef();
|
||||
};
|
||||
|
||||
for (const auto & listen_host : listen_hosts)
|
||||
@ -373,7 +460,7 @@ try
|
||||
"Keeper (tcp): " + address.toString(),
|
||||
std::make_unique<TCPServer>(
|
||||
new KeeperTCPHandlerFactory(
|
||||
config_getter, tiny_context.getKeeperDispatcher(),
|
||||
config_getter, tiny_context->getKeeperDispatcher(),
|
||||
config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC),
|
||||
config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), false), server_pool, socket));
|
||||
});
|
||||
@ -392,7 +479,7 @@ try
|
||||
"Keeper with secure protocol (tcp_secure): " + address.toString(),
|
||||
std::make_unique<TCPServer>(
|
||||
new KeeperTCPHandlerFactory(
|
||||
config_getter, tiny_context.getKeeperDispatcher(),
|
||||
config_getter, tiny_context->getKeeperDispatcher(),
|
||||
config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC),
|
||||
config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), true), server_pool, socket));
|
||||
#else
|
||||
@ -401,6 +488,29 @@ try
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
#endif
|
||||
});
|
||||
|
||||
const auto & config = config_getter();
|
||||
Poco::Timespan keep_alive_timeout(config.getUInt("keep_alive_timeout", 10), 0);
|
||||
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||
http_params->setTimeout(DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC);
|
||||
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
||||
|
||||
/// Prometheus (if defined and not setup yet with http_port)
|
||||
port_name = "prometheus.port";
|
||||
createServer(listen_host, port_name, listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
auto http_context = httpContext();
|
||||
socket.setReceiveTimeout(http_context->getReceiveTimeout());
|
||||
socket.setSendTimeout(http_context->getSendTimeout());
|
||||
servers->emplace_back(
|
||||
listen_host,
|
||||
port_name,
|
||||
"Prometheus: http://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
std::move(http_context), createPrometheusMainHandlerFactory(*this, config_getter(), async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
|
||||
});
|
||||
}
|
||||
|
||||
for (auto & server : *servers)
|
||||
@ -409,6 +519,8 @@ try
|
||||
LOG_INFO(log, "Listening for {}", server.getDescription());
|
||||
}
|
||||
|
||||
async_metrics.start();
|
||||
|
||||
zkutil::EventPtr unused_event = std::make_shared<Poco::Event>();
|
||||
zkutil::ZooKeeperNodeCache unused_cache([] { return nullptr; });
|
||||
/// ConfigReloader have to strict parameters which are redundant in our case
|
||||
@ -421,7 +533,7 @@ try
|
||||
[&](ConfigurationPtr config, bool /* initial_loading */)
|
||||
{
|
||||
if (config->has("keeper_server"))
|
||||
tiny_context.updateKeeperConfiguration(*config);
|
||||
tiny_context->updateKeeperConfiguration(*config);
|
||||
},
|
||||
/* already_loaded = */ false); /// Reload it right now (initial loading)
|
||||
|
||||
@ -429,6 +541,8 @@ try
|
||||
LOG_INFO(log, "Shutting down.");
|
||||
main_config_reloader.reset();
|
||||
|
||||
async_metrics.stop();
|
||||
|
||||
LOG_DEBUG(log, "Waiting for current connections to Keeper to finish.");
|
||||
size_t current_connections = 0;
|
||||
for (auto & server : *servers)
|
||||
@ -450,7 +564,7 @@ try
|
||||
else
|
||||
LOG_INFO(log, "Closed connections to Keeper.");
|
||||
|
||||
tiny_context.shutdownKeeperDispatcher();
|
||||
tiny_context->shutdownKeeperDispatcher();
|
||||
|
||||
/// Wait server pool to avoid use-after-free of destroyed context in the handlers
|
||||
server_pool.joinAll();
|
||||
|
@ -1,8 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <Server/IServer.h>
|
||||
#include <Server/HTTP/HTTPContext.h>
|
||||
#include <Daemon/BaseDaemon.h>
|
||||
#include "TinyContext.h"
|
||||
#include <Coordination/TinyContext.h>
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
@ -15,29 +16,40 @@ namespace Poco
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
/// standalone clickhouse-keeper server (replacement for ZooKeeper). Uses the same
|
||||
/// config as clickhouse-server. Serves requests on TCP ports with or without
|
||||
/// SSL using ZooKeeper protocol.
|
||||
class Keeper : public BaseDaemon
|
||||
class Keeper : public BaseDaemon, public IServer
|
||||
{
|
||||
public:
|
||||
using ServerApplication::run;
|
||||
|
||||
Poco::Util::LayeredConfiguration & config() const
|
||||
Poco::Util::LayeredConfiguration & config() const override
|
||||
{
|
||||
return BaseDaemon::config();
|
||||
}
|
||||
|
||||
Poco::Logger & logger() const
|
||||
Poco::Logger & logger() const override
|
||||
{
|
||||
return BaseDaemon::logger();
|
||||
}
|
||||
|
||||
bool isCancelled() const
|
||||
bool isCancelled() const override
|
||||
{
|
||||
return BaseDaemon::isCancelled();
|
||||
}
|
||||
|
||||
/// Returns global application's context.
|
||||
ContextMutablePtr context() const override
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot fetch context for Keeper");
|
||||
}
|
||||
|
||||
void defineOptions(Poco::Util::OptionSet & _options) override;
|
||||
|
||||
protected:
|
||||
@ -56,7 +68,10 @@ protected:
|
||||
std::string getDefaultConfigFileName() const override;
|
||||
|
||||
private:
|
||||
TinyContext tiny_context;
|
||||
TinyContextPtr tiny_context;
|
||||
|
||||
struct KeeperHTTPContext;
|
||||
HTTPContextPtr httpContext();
|
||||
|
||||
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const;
|
||||
|
||||
|
@ -37,7 +37,7 @@
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <TableFunctions/registerTableFunctions.h>
|
||||
#include <Storages/registerStorages.h>
|
||||
#include <Storages/NamedCollections.h>
|
||||
#include <Storages/NamedCollectionUtils.h>
|
||||
#include <Dictionaries/registerDictionaries.h>
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
@ -120,7 +120,7 @@ void LocalServer::initialize(Poco::Util::Application & self)
|
||||
config().getUInt("max_io_thread_pool_free_size", 0),
|
||||
config().getUInt("io_thread_pool_queue_size", 10000));
|
||||
|
||||
NamedCollectionFactory::instance().initialize(config());
|
||||
NamedCollectionUtils::loadFromConfig(config());
|
||||
}
|
||||
|
||||
|
||||
@ -212,6 +212,8 @@ void LocalServer::tryInitPath()
|
||||
|
||||
global_context->setUserFilesPath(""); // user's files are everywhere
|
||||
|
||||
NamedCollectionUtils::loadFromSQL(global_context);
|
||||
|
||||
/// top_level_domains_lists
|
||||
const std::string & top_level_domains_path = config().getString("top_level_domains_path", path + "top_level_domains/");
|
||||
if (!top_level_domains_path.empty())
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "MetricsTransmitter.h"
|
||||
|
||||
#include <Interpreters/AsynchronousMetrics.h>
|
||||
#include <Common/AsynchronousMetrics.h>
|
||||
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/Exception.h>
|
||||
|
@ -46,7 +46,7 @@
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/IOThreadPool.h>
|
||||
#include <IO/UseSSL.h>
|
||||
#include <Interpreters/AsynchronousMetrics.h>
|
||||
#include <Interpreters/ServerAsynchronousMetrics.h>
|
||||
#include <Interpreters/DDLWorker.h>
|
||||
#include <Interpreters/DNSCacheUpdater.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
@ -60,7 +60,7 @@
|
||||
#include <Storages/System/attachInformationSchemaTables.h>
|
||||
#include <Storages/Cache/ExternalDataSourceCache.h>
|
||||
#include <Storages/Cache/registerRemoteFileMetadatas.h>
|
||||
#include <Storages/NamedCollections.h>
|
||||
#include <Storages/NamedCollectionUtils.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
|
||||
#include <Functions/registerFunctions.h>
|
||||
@ -782,7 +782,7 @@ try
|
||||
config().getUInt("max_io_thread_pool_free_size", 0),
|
||||
config().getUInt("io_thread_pool_queue_size", 10000));
|
||||
|
||||
NamedCollectionFactory::instance().initialize(config());
|
||||
NamedCollectionUtils::loadFromConfig(config());
|
||||
|
||||
/// Initialize global local cache for remote filesystem.
|
||||
if (config().has("local_cache_for_remote_fs"))
|
||||
@ -803,7 +803,7 @@ try
|
||||
std::vector<ProtocolServerAdapter> servers;
|
||||
std::vector<ProtocolServerAdapter> servers_to_start_before_tables;
|
||||
/// This object will periodically calculate some metrics.
|
||||
AsynchronousMetrics async_metrics(
|
||||
ServerAsynchronousMetrics async_metrics(
|
||||
global_context,
|
||||
config().getUInt("asynchronous_metrics_update_period_s", 1),
|
||||
config().getUInt("asynchronous_heavy_metrics_update_period_s", 120),
|
||||
@ -1168,6 +1168,8 @@ try
|
||||
SensitiveDataMasker::setInstance(std::make_unique<SensitiveDataMasker>(config(), "query_masking_rules"));
|
||||
}
|
||||
|
||||
NamedCollectionUtils::loadFromSQL(global_context);
|
||||
|
||||
auto main_config_reloader = std::make_unique<ConfigReloader>(
|
||||
config_path,
|
||||
include_from_path,
|
||||
@ -1336,7 +1338,8 @@ try
|
||||
#if USE_SSL
|
||||
CertificateReloader::instance().tryLoad(*config);
|
||||
#endif
|
||||
NamedCollectionFactory::instance().reload(*config);
|
||||
NamedCollectionUtils::reloadFromConfig(*config);
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::MainConfigLoads);
|
||||
|
||||
/// Must be the last.
|
||||
@ -1947,15 +1950,15 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
||||
return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this));
|
||||
if (type == "http")
|
||||
return TCPServerConnectionFactory::Ptr(
|
||||
new HTTPServerConnectionFactory(context(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"))
|
||||
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"))
|
||||
);
|
||||
if (type == "prometheus")
|
||||
return TCPServerConnectionFactory::Ptr(
|
||||
new HTTPServerConnectionFactory(context(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"))
|
||||
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"))
|
||||
);
|
||||
if (type == "interserver")
|
||||
return TCPServerConnectionFactory::Ptr(
|
||||
new HTTPServerConnectionFactory(context(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"))
|
||||
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"))
|
||||
);
|
||||
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol configuration error, unknown protocol name '{}'", type);
|
||||
@ -1996,6 +1999,11 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
||||
return stack;
|
||||
}
|
||||
|
||||
HTTPContextPtr Server::httpContext() const
|
||||
{
|
||||
return std::make_shared<HTTPContext>(context());
|
||||
}
|
||||
|
||||
void Server::createServers(
|
||||
Poco::Util::AbstractConfiguration & config,
|
||||
const Strings & listen_hosts,
|
||||
@ -2078,7 +2086,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"http://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
context(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params));
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params));
|
||||
});
|
||||
|
||||
/// HTTPS
|
||||
@ -2095,7 +2103,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"https://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
context(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params));
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"HTTPS protocol is disabled because Poco library was built without NetSSL support.",
|
||||
@ -2220,7 +2228,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"Prometheus: http://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
context(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
|
||||
});
|
||||
}
|
||||
|
||||
@ -2240,7 +2248,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"replica communication (interserver): http://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
context(),
|
||||
httpContext(),
|
||||
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"),
|
||||
server_pool,
|
||||
socket,
|
||||
@ -2260,7 +2268,7 @@ void Server::createServers(
|
||||
port_name,
|
||||
"secure replica communication (interserver): https://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
context(),
|
||||
httpContext(),
|
||||
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPSHandler-factory"),
|
||||
server_pool,
|
||||
socket,
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Server/IServer.h>
|
||||
|
||||
#include <Daemon/BaseDaemon.h>
|
||||
#include "Server/HTTP/HTTPContext.h"
|
||||
#include <Server/TCPProtocolStackFactory.h>
|
||||
#include <Poco/Net/HTTPServerParams.h>
|
||||
|
||||
@ -72,6 +73,8 @@ private:
|
||||
/// Updated/recent config, to compare http_handlers
|
||||
ConfigurationPtr latest_config;
|
||||
|
||||
HTTPContextPtr httpContext() const;
|
||||
|
||||
Poco::Net::SocketAddress socketBindListen(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
Poco::Net::ServerSocket & socket,
|
||||
|
@ -104,7 +104,7 @@ public:
|
||||
/// The same as allColumnFlags().
|
||||
static AccessFlags allFlagsGrantableOnColumnLevel();
|
||||
|
||||
static constexpr size_t SIZE = 128;
|
||||
static constexpr size_t SIZE = 256;
|
||||
private:
|
||||
using Flags = std::bitset<SIZE>;
|
||||
Flags flags;
|
||||
|
@ -69,6 +69,7 @@ enum class AccessType
|
||||
M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \
|
||||
\
|
||||
M(ALTER_DATABASE_SETTINGS, "ALTER DATABASE SETTING, ALTER MODIFY DATABASE SETTING, MODIFY DATABASE SETTING", DATABASE, ALTER_DATABASE) /* allows to execute ALTER MODIFY SETTING */\
|
||||
M(ALTER_NAMED_COLLECTION, "", GROUP, ALTER) /* allows to execute ALTER NAMED COLLECTION */\
|
||||
\
|
||||
M(ALTER_TABLE, "", GROUP, ALTER) \
|
||||
M(ALTER_DATABASE, "", GROUP, ALTER) \
|
||||
@ -88,6 +89,7 @@ enum class AccessType
|
||||
M(CREATE_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables;
|
||||
implicitly enabled by the grant CREATE_TABLE on any table */ \
|
||||
M(CREATE_FUNCTION, "", GLOBAL, CREATE) /* allows to execute CREATE FUNCTION */ \
|
||||
M(CREATE_NAMED_COLLECTION, "", GLOBAL, CREATE) /* allows to execute CREATE NAMED COLLECTION */ \
|
||||
M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \
|
||||
\
|
||||
M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\
|
||||
@ -96,6 +98,7 @@ enum class AccessType
|
||||
implicitly enabled by the grant DROP_TABLE */\
|
||||
M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\
|
||||
M(DROP_FUNCTION, "", GLOBAL, DROP) /* allows to execute DROP FUNCTION */\
|
||||
M(DROP_NAMED_COLLECTION, "", GLOBAL, DROP) /* allows to execute DROP NAMED COLLECTION */\
|
||||
M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\
|
||||
\
|
||||
M(TRUNCATE, "TRUNCATE TABLE", TABLE, ALL) \
|
||||
|
@ -160,7 +160,7 @@ public:
|
||||
else
|
||||
{
|
||||
writeBinary(UInt8(0), buf);
|
||||
serialization->serializeBinary(elem, buf);
|
||||
serialization->serializeBinary(elem, buf, {});
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -181,7 +181,7 @@ public:
|
||||
UInt8 is_null = 0;
|
||||
readBinary(is_null, buf);
|
||||
if (!is_null)
|
||||
serialization->deserializeBinary(arr[i], buf);
|
||||
serialization->deserializeBinary(arr[i], buf, {});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -795,7 +795,7 @@ public:
|
||||
if (!value.isNull())
|
||||
{
|
||||
writeBinary(true, buf);
|
||||
serialization.serializeBinary(value, buf);
|
||||
serialization.serializeBinary(value, buf, {});
|
||||
}
|
||||
else
|
||||
writeBinary(false, buf);
|
||||
@ -807,7 +807,7 @@ public:
|
||||
readBinary(is_not_null, buf);
|
||||
|
||||
if (is_not_null)
|
||||
serialization.deserializeBinary(value, buf);
|
||||
serialization.deserializeBinary(value, buf, {});
|
||||
}
|
||||
|
||||
void change(const IColumn & column, size_t row_num, Arena *)
|
||||
|
@ -77,7 +77,10 @@ protected:
|
||||
|
||||
static bool getFlag(ConstAggregateDataPtr __restrict place) noexcept
|
||||
{
|
||||
return result_is_nullable ? place[0] : true;
|
||||
if constexpr (result_is_nullable)
|
||||
return place[0];
|
||||
else
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
@ -98,9 +101,10 @@ public:
|
||||
|
||||
DataTypePtr getReturnType() const override
|
||||
{
|
||||
return result_is_nullable
|
||||
? makeNullable(nested_function->getReturnType())
|
||||
: nested_function->getReturnType();
|
||||
if constexpr (result_is_nullable)
|
||||
return makeNullable(nested_function->getReturnType());
|
||||
else
|
||||
return nested_function->getReturnType();
|
||||
}
|
||||
|
||||
void create(AggregateDataPtr __restrict place) const override
|
||||
@ -136,8 +140,9 @@ public:
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
if (result_is_nullable && getFlag(rhs))
|
||||
setFlag(place);
|
||||
if constexpr (result_is_nullable)
|
||||
if (getFlag(rhs))
|
||||
setFlag(place);
|
||||
|
||||
nested_function->merge(nestedPlace(place), nestedPlace(rhs), arena);
|
||||
}
|
||||
|
@ -296,19 +296,19 @@ public:
|
||||
{
|
||||
case 0:
|
||||
{
|
||||
serialize = [&](size_t col_idx, const Array & values){ values_serializations[col_idx]->serializeBinary(values[col_idx], buf); };
|
||||
serialize = [&](size_t col_idx, const Array & values){ values_serializations[col_idx]->serializeBinary(values[col_idx], buf, {}); };
|
||||
break;
|
||||
}
|
||||
case 1:
|
||||
{
|
||||
serialize = [&](size_t col_idx, const Array & values){ promoted_values_serializations[col_idx]->serializeBinary(values[col_idx], buf); };
|
||||
serialize = [&](size_t col_idx, const Array & values){ promoted_values_serializations[col_idx]->serializeBinary(values[col_idx], buf, {}); };
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto & elem : merged_maps)
|
||||
{
|
||||
keys_serialization->serializeBinary(elem.first, buf);
|
||||
keys_serialization->serializeBinary(elem.first, buf, {});
|
||||
for (size_t col = 0; col < values_types.size(); ++col)
|
||||
serialize(col, elem.second);
|
||||
}
|
||||
@ -328,12 +328,12 @@ public:
|
||||
{
|
||||
case 0:
|
||||
{
|
||||
deserialize = [&](size_t col_idx, Array & values){ values_serializations[col_idx]->deserializeBinary(values[col_idx], buf); };
|
||||
deserialize = [&](size_t col_idx, Array & values){ values_serializations[col_idx]->deserializeBinary(values[col_idx], buf, {}); };
|
||||
break;
|
||||
}
|
||||
case 1:
|
||||
{
|
||||
deserialize = [&](size_t col_idx, Array & values){ promoted_values_serializations[col_idx]->deserializeBinary(values[col_idx], buf); };
|
||||
deserialize = [&](size_t col_idx, Array & values){ promoted_values_serializations[col_idx]->deserializeBinary(values[col_idx], buf, {}); };
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -341,7 +341,7 @@ public:
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
Field key;
|
||||
keys_serialization->deserializeBinary(key, buf);
|
||||
keys_serialization->deserializeBinary(key, buf, {});
|
||||
|
||||
Array values;
|
||||
values.resize(values_types.size());
|
||||
|
@ -79,8 +79,6 @@ FunctionNodePtr createResolvedAggregateFunction(const String & name, const Query
|
||||
function_node->resolveAsAggregateFunction(aggregate_function, aggregate_function->getReturnType());
|
||||
function_node->getArguments().getNodes() = { argument };
|
||||
|
||||
function_node->getArguments().getNodes() = { argument };
|
||||
|
||||
if (!parameters.empty())
|
||||
{
|
||||
QueryTreeNodes parameter_nodes;
|
||||
|
@ -2843,6 +2843,14 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifierInParentScopes(const
|
||||
}
|
||||
}
|
||||
|
||||
/** Nested subqueries cannot access outer subqueries table expressions from JOIN tree because
|
||||
* that can prevent resolution of table expression from CTE.
|
||||
*
|
||||
* Example: WITH a AS (SELECT number FROM numbers(1)), b AS (SELECT number FROM a) SELECT * FROM a as l, b as r;
|
||||
*/
|
||||
if (identifier_lookup.isTableExpressionLookup())
|
||||
identifier_resolve_settings.allow_to_check_join_tree = false;
|
||||
|
||||
while (scope_to_check != nullptr)
|
||||
{
|
||||
auto lookup_result = tryResolveIdentifier(identifier_lookup, *scope_to_check, identifier_resolve_settings);
|
||||
|
@ -166,7 +166,8 @@ void BackupWriterS3::copyObjectImpl(
|
||||
|
||||
auto outcome = client->CopyObject(request);
|
||||
|
||||
if (!outcome.IsSuccess() && outcome.GetError().GetExceptionName() == "EntityTooLarge")
|
||||
if (!outcome.IsSuccess() && (outcome.GetError().GetExceptionName() == "EntityTooLarge"
|
||||
|| outcome.GetError().GetExceptionName() == "InvalidRequest"))
|
||||
{ // Can't come here with MinIO, MinIO allows single part upload for large objects.
|
||||
copyObjectMultipartImpl(src_bucket, src_key, dst_bucket, dst_key, head, metadata);
|
||||
return;
|
||||
|
@ -96,6 +96,7 @@ RestorerFromBackup::RestorerFromBackup(
|
||||
, on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000))
|
||||
, create_table_timeout(context->getConfigRef().getUInt64("backups.create_table_timeout", 300000))
|
||||
, log(&Poco::Logger::get("RestorerFromBackup"))
|
||||
, tables_dependencies("RestorerFromBackup")
|
||||
{
|
||||
}
|
||||
|
||||
@ -133,6 +134,7 @@ RestorerFromBackup::DataRestoreTasks RestorerFromBackup::run(Mode mode)
|
||||
|
||||
/// Create tables using the create queries read from the backup.
|
||||
setStage(Stage::CREATING_TABLES);
|
||||
removeUnresolvedDependencies();
|
||||
createTables();
|
||||
|
||||
/// All what's left is to insert data to tables.
|
||||
@ -341,10 +343,11 @@ void RestorerFromBackup::findTableInBackup(const QualifiedTableName & table_name
|
||||
TableInfo & res_table_info = table_infos[table_name];
|
||||
res_table_info.create_table_query = create_table_query;
|
||||
res_table_info.is_predefined_table = DatabaseCatalog::instance().isPredefinedTable(StorageID{table_name.database, table_name.table});
|
||||
res_table_info.dependencies = getDependenciesSetFromCreateQuery(context->getGlobalContext(), table_name, create_table_query);
|
||||
res_table_info.has_data = backup->hasFiles(data_path_in_backup);
|
||||
res_table_info.data_path_in_backup = data_path_in_backup;
|
||||
|
||||
tables_dependencies.addDependencies(table_name, getDependenciesFromCreateQuery(context->getGlobalContext(), table_name, create_table_query));
|
||||
|
||||
if (partitions)
|
||||
{
|
||||
if (!res_table_info.partitions)
|
||||
@ -622,21 +625,62 @@ void RestorerFromBackup::checkDatabase(const String & database_name)
|
||||
}
|
||||
}
|
||||
|
||||
void RestorerFromBackup::removeUnresolvedDependencies()
|
||||
{
|
||||
auto need_exclude_dependency = [this](const StorageID & table_id)
|
||||
{
|
||||
/// Table will be restored.
|
||||
if (table_infos.contains(table_id.getQualifiedName()))
|
||||
return false;
|
||||
|
||||
/// Table exists and it already exists
|
||||
if (!DatabaseCatalog::instance().isTableExist(table_id, context))
|
||||
{
|
||||
LOG_WARNING(
|
||||
log,
|
||||
"Tables {} in backup depend on {}, but seems like {} is not in the backup and does not exist. "
|
||||
"Will try to ignore that and restore tables",
|
||||
fmt::join(tables_dependencies.getDependents(table_id), ", "),
|
||||
table_id,
|
||||
table_id);
|
||||
}
|
||||
|
||||
size_t num_dependencies, num_dependents;
|
||||
tables_dependencies.getNumberOfAdjacents(table_id, num_dependencies, num_dependents);
|
||||
if (num_dependencies || !num_dependents)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Table {} in backup doesn't have dependencies and dependent tables as it expected to. It's a bug",
|
||||
table_id);
|
||||
|
||||
return true; /// Exclude this dependency.
|
||||
};
|
||||
|
||||
tables_dependencies.removeTablesIf(need_exclude_dependency);
|
||||
|
||||
if (tables_dependencies.getNumberOfTables() != table_infos.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Number of tables to be restored is not as expected. It's a bug");
|
||||
|
||||
if (tables_dependencies.hasCyclicDependencies())
|
||||
{
|
||||
LOG_WARNING(
|
||||
log,
|
||||
"Tables {} in backup have cyclic dependencies: {}. Will try to ignore that and restore tables",
|
||||
fmt::join(tables_dependencies.getTablesWithCyclicDependencies(), ", "),
|
||||
tables_dependencies.describeCyclicDependencies());
|
||||
}
|
||||
}
|
||||
|
||||
void RestorerFromBackup::createTables()
|
||||
{
|
||||
while (true)
|
||||
/// We need to create tables considering their dependencies.
|
||||
auto tables_to_create = tables_dependencies.getTablesSortedByDependency();
|
||||
for (const auto & table_id : tables_to_create)
|
||||
{
|
||||
/// We need to create tables considering their dependencies.
|
||||
auto tables_to_create = findTablesWithoutDependencies();
|
||||
if (tables_to_create.empty())
|
||||
break; /// We've already created all the tables.
|
||||
|
||||
for (const auto & table_name : tables_to_create)
|
||||
{
|
||||
createTable(table_name);
|
||||
checkTable(table_name);
|
||||
insertDataToTable(table_name);
|
||||
}
|
||||
auto table_name = table_id.getQualifiedName();
|
||||
createTable(table_name);
|
||||
checkTable(table_name);
|
||||
insertDataToTable(table_name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -752,62 +796,6 @@ void RestorerFromBackup::insertDataToTable(const QualifiedTableName & table_name
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the list of tables without dependencies or those which dependencies have been created before.
|
||||
std::vector<QualifiedTableName> RestorerFromBackup::findTablesWithoutDependencies() const
|
||||
{
|
||||
std::vector<QualifiedTableName> tables_without_dependencies;
|
||||
bool all_tables_created = true;
|
||||
|
||||
for (const auto & [key, table_info] : table_infos)
|
||||
{
|
||||
if (table_info.storage)
|
||||
continue;
|
||||
|
||||
/// Found a table which is not created yet.
|
||||
all_tables_created = false;
|
||||
|
||||
/// Check if all dependencies have been created before.
|
||||
bool all_dependencies_met = true;
|
||||
for (const auto & dependency : table_info.dependencies)
|
||||
{
|
||||
auto it = table_infos.find(dependency);
|
||||
if ((it != table_infos.end()) && !it->second.storage)
|
||||
{
|
||||
all_dependencies_met = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (all_dependencies_met)
|
||||
tables_without_dependencies.push_back(key);
|
||||
}
|
||||
|
||||
if (!tables_without_dependencies.empty())
|
||||
return tables_without_dependencies;
|
||||
|
||||
if (all_tables_created)
|
||||
return {};
|
||||
|
||||
/// Cyclic dependency? We'll try to create those tables anyway but probably it's going to fail.
|
||||
std::vector<QualifiedTableName> tables_with_cyclic_dependencies;
|
||||
for (const auto & [key, table_info] : table_infos)
|
||||
{
|
||||
if (!table_info.storage)
|
||||
tables_with_cyclic_dependencies.push_back(key);
|
||||
}
|
||||
|
||||
/// Only show a warning here, proper exception will be thrown later on creating those tables.
|
||||
LOG_WARNING(
|
||||
log,
|
||||
"Some tables have cyclic dependency from each other: {}",
|
||||
boost::algorithm::join(
|
||||
tables_with_cyclic_dependencies
|
||||
| boost::adaptors::transformed([](const QualifiedTableName & table_name) -> String { return table_name.getFullName(); }),
|
||||
", "));
|
||||
|
||||
return tables_with_cyclic_dependencies;
|
||||
}
|
||||
|
||||
void RestorerFromBackup::addDataRestoreTask(DataRestoreTask && new_task)
|
||||
{
|
||||
if (current_stage == Stage::INSERTING_DATA_TO_TABLES)
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Backups/RestoreSettings.h>
|
||||
#include <Databases/DDLRenamingVisitor.h>
|
||||
#include <Databases/TablesDependencyGraph.h>
|
||||
#include <Parsers/ASTBackupQuery.h>
|
||||
#include <Storages/TableLockHolder.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
@ -94,6 +95,7 @@ private:
|
||||
void createDatabase(const String & database_name) const;
|
||||
void checkDatabase(const String & database_name);
|
||||
|
||||
void removeUnresolvedDependencies();
|
||||
void createTables();
|
||||
void createTable(const QualifiedTableName & table_name);
|
||||
void checkTable(const QualifiedTableName & table_name);
|
||||
@ -114,7 +116,6 @@ private:
|
||||
{
|
||||
ASTPtr create_table_query;
|
||||
bool is_predefined_table = false;
|
||||
std::unordered_set<QualifiedTableName> dependencies;
|
||||
bool has_data = false;
|
||||
std::filesystem::path data_path_in_backup;
|
||||
std::optional<ASTs> partitions;
|
||||
@ -123,11 +124,10 @@ private:
|
||||
TableLockHolder table_lock;
|
||||
};
|
||||
|
||||
std::vector<QualifiedTableName> findTablesWithoutDependencies() const;
|
||||
|
||||
String current_stage;
|
||||
std::unordered_map<String, DatabaseInfo> database_infos;
|
||||
std::map<QualifiedTableName, TableInfo> table_infos;
|
||||
TablesDependencyGraph tables_dependencies;
|
||||
std::vector<DataRestoreTask> data_restore_tasks;
|
||||
std::unique_ptr<AccessRestorerFromBackup> access_restorer;
|
||||
bool access_restored = false;
|
||||
|
@ -236,7 +236,7 @@ int IBridge::main(const std::vector<std::string> & /*args*/)
|
||||
SensitiveDataMasker::setInstance(std::make_unique<SensitiveDataMasker>(config(), "query_masking_rules"));
|
||||
|
||||
auto server = HTTPServer(
|
||||
context,
|
||||
std::make_shared<HTTPContext>(context),
|
||||
getHandlerFactoryPtr(context),
|
||||
server_pool,
|
||||
socket,
|
||||
|
@ -148,7 +148,8 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
|
||||
socket->setReceiveTimeout(timeouts.receive_timeout);
|
||||
socket->setSendTimeout(timeouts.send_timeout);
|
||||
socket->setNoDelay(true);
|
||||
if (timeouts.tcp_keep_alive_timeout.totalSeconds())
|
||||
int tcp_keep_alive_timeout_in_sec = timeouts.tcp_keep_alive_timeout.totalSeconds();
|
||||
if (tcp_keep_alive_timeout_in_sec)
|
||||
{
|
||||
socket->setKeepAlive(true);
|
||||
socket->setOption(IPPROTO_TCP,
|
||||
@ -157,7 +158,7 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
|
||||
#else
|
||||
TCP_KEEPIDLE // __APPLE__
|
||||
#endif
|
||||
, timeouts.tcp_keep_alive_timeout);
|
||||
, tcp_keep_alive_timeout_in_sec);
|
||||
}
|
||||
|
||||
in = std::make_shared<ReadBufferFromPocoSocket>(*socket);
|
||||
|
@ -1,28 +1,16 @@
|
||||
#include <Interpreters/Aggregator.h>
|
||||
#include <Interpreters/AsynchronousMetrics.h>
|
||||
#include <Interpreters/AsynchronousMetricLog.h>
|
||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Coordination/Keeper4LWInfo.h>
|
||||
#include <Coordination/KeeperDispatcher.h>
|
||||
#include <Common/AsynchronousMetrics.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Interpreters/Cache/FileCacheFactory.h>
|
||||
#include <Common/getCurrentProcessFDCount.h>
|
||||
#include <Common/getMaxFileDescriptorCount.h>
|
||||
#include <Interpreters/Cache/FileCache.h>
|
||||
#include <Storages/MarkCache.h>
|
||||
#include <Storages/StorageMergeTree.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
#include <Storages/MergeTree/MergeTreeMetadataCache.h>
|
||||
#include <Server/ProtocolServerAdapter.h>
|
||||
#include <IO/UncompressedCache.h>
|
||||
#include <IO/MMappedFileCache.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <chrono>
|
||||
|
||||
@ -68,15 +56,11 @@ static std::unique_ptr<ReadBufferFromFilePRead> openFileIfExists(const std::stri
|
||||
|
||||
|
||||
AsynchronousMetrics::AsynchronousMetrics(
|
||||
ContextPtr global_context_,
|
||||
int update_period_seconds,
|
||||
int heavy_metrics_update_period_seconds,
|
||||
const ProtocolServerMetricsFunc & protocol_server_metrics_func_)
|
||||
: WithContext(global_context_)
|
||||
, update_period(update_period_seconds)
|
||||
, heavy_metric_update_period(heavy_metrics_update_period_seconds)
|
||||
, protocol_server_metrics_func(protocol_server_metrics_func_)
|
||||
: update_period(update_period_seconds)
|
||||
, log(&Poco::Logger::get("AsynchronousMetrics"))
|
||||
, protocol_server_metrics_func(protocol_server_metrics_func_)
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
openFileIfExists("/proc/meminfo", meminfo);
|
||||
@ -360,22 +344,6 @@ void AsynchronousMetrics::run()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename Max, typename T>
|
||||
static void calculateMax(Max & max, T x)
|
||||
{
|
||||
if (Max(x) > max)
|
||||
max = x;
|
||||
}
|
||||
|
||||
template <typename Max, typename Sum, typename T>
|
||||
static void calculateMaxAndSum(Max & max, Sum & sum, T x)
|
||||
{
|
||||
sum += x;
|
||||
if (Max(x) > max)
|
||||
max = x;
|
||||
}
|
||||
|
||||
#if USE_JEMALLOC
|
||||
uint64_t updateJemallocEpoch()
|
||||
{
|
||||
@ -575,91 +543,6 @@ void AsynchronousMetrics::update(TimePoint update_time)
|
||||
"The difference in time the thread for calculation of the asynchronous metrics was scheduled to wake up and the time it was in fact, woken up."
|
||||
" A proxy-indicator of overall system latency and responsiveness." };
|
||||
|
||||
if (auto mark_cache = getContext()->getMarkCache())
|
||||
{
|
||||
new_values["MarkCacheBytes"] = { mark_cache->weight(), "Total size of mark cache in bytes" };
|
||||
new_values["MarkCacheFiles"] = { mark_cache->count(), "Total number of mark files cached in the mark cache" };
|
||||
}
|
||||
|
||||
if (auto uncompressed_cache = getContext()->getUncompressedCache())
|
||||
{
|
||||
new_values["UncompressedCacheBytes"] = { uncompressed_cache->weight(),
|
||||
"Total size of uncompressed cache in bytes. Uncompressed cache does not usually improve the performance and should be mostly avoided." };
|
||||
new_values["UncompressedCacheCells"] = { uncompressed_cache->count(),
|
||||
"Total number of entries in the uncompressed cache. Each entry represents a decompressed block of data. Uncompressed cache does not usually improve performance and should be mostly avoided." };
|
||||
}
|
||||
|
||||
if (auto index_mark_cache = getContext()->getIndexMarkCache())
|
||||
{
|
||||
new_values["IndexMarkCacheBytes"] = { index_mark_cache->weight(), "Total size of mark cache for secondary indices in bytes." };
|
||||
new_values["IndexMarkCacheFiles"] = { index_mark_cache->count(), "Total number of mark files cached in the mark cache for secondary indices." };
|
||||
}
|
||||
|
||||
if (auto index_uncompressed_cache = getContext()->getIndexUncompressedCache())
|
||||
{
|
||||
new_values["IndexUncompressedCacheBytes"] = { index_uncompressed_cache->weight(),
|
||||
"Total size of uncompressed cache in bytes for secondary indices. Uncompressed cache does not usually improve the performance and should be mostly avoided." };
|
||||
new_values["IndexUncompressedCacheCells"] = { index_uncompressed_cache->count(),
|
||||
"Total number of entries in the uncompressed cache for secondary indices. Each entry represents a decompressed block of data. Uncompressed cache does not usually improve performance and should be mostly avoided." };
|
||||
}
|
||||
|
||||
if (auto mmap_cache = getContext()->getMMappedFileCache())
|
||||
{
|
||||
new_values["MMapCacheCells"] = { mmap_cache->count(),
|
||||
"The number of files opened with `mmap` (mapped in memory)."
|
||||
" This is used for queries with the setting `local_filesystem_read_method` set to `mmap`."
|
||||
" The files opened with `mmap` are kept in the cache to avoid costly TLB flushes."};
|
||||
}
|
||||
|
||||
{
|
||||
auto caches = FileCacheFactory::instance().getAll();
|
||||
size_t total_bytes = 0;
|
||||
size_t total_files = 0;
|
||||
|
||||
for (const auto & [_, cache_data] : caches)
|
||||
{
|
||||
total_bytes += cache_data->cache->getUsedCacheSize();
|
||||
total_files += cache_data->cache->getFileSegmentsNum();
|
||||
}
|
||||
|
||||
new_values["FilesystemCacheBytes"] = { total_bytes,
|
||||
"Total bytes in the `cache` virtual filesystem. This cache is hold on disk." };
|
||||
new_values["FilesystemCacheFiles"] = { total_files,
|
||||
"Total number of cached file segments in the `cache` virtual filesystem. This cache is hold on disk." };
|
||||
}
|
||||
|
||||
#if USE_ROCKSDB
|
||||
if (auto metadata_cache = getContext()->tryGetMergeTreeMetadataCache())
|
||||
{
|
||||
new_values["MergeTreeMetadataCacheSize"] = { metadata_cache->getEstimateNumKeys(),
|
||||
"The size of the metadata cache for tables. This cache is experimental and not used in production." };
|
||||
}
|
||||
#endif
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
if (auto * compiled_expression_cache = CompiledExpressionCacheFactory::instance().tryGetCache())
|
||||
{
|
||||
new_values["CompiledExpressionCacheBytes"] = { compiled_expression_cache->weight(),
|
||||
"Total bytes used for the cache of JIT-compiled code." };
|
||||
new_values["CompiledExpressionCacheCount"] = { compiled_expression_cache->count(),
|
||||
"Total entries in the cache of JIT-compiled code." };
|
||||
}
|
||||
#endif
|
||||
|
||||
new_values["Uptime"] = { getContext()->getUptimeSeconds(),
|
||||
"The server uptime in seconds. It includes the time spent for server initialization before accepting connections." };
|
||||
|
||||
if (const auto stats = getHashTablesCacheStatistics())
|
||||
{
|
||||
new_values["HashTableStatsCacheEntries"] = { stats->entries,
|
||||
"The number of entries in the cache of hash table sizes."
|
||||
" The cache for hash table sizes is used for predictive optimization of GROUP BY." };
|
||||
new_values["HashTableStatsCacheHits"] = { stats->hits,
|
||||
"The number of times the prediction of a hash table size was correct." };
|
||||
new_values["HashTableStatsCacheMisses"] = { stats->misses,
|
||||
"The number of times the prediction of a hash table size was incorrect." };
|
||||
}
|
||||
|
||||
#if defined(OS_LINUX) || defined(OS_FREEBSD)
|
||||
MemoryStatisticsOS::Data memory_statistics_data = memory_stat.get();
|
||||
#endif
|
||||
@ -1519,165 +1402,7 @@ void AsynchronousMetrics::update(TimePoint update_time)
|
||||
}
|
||||
#endif
|
||||
|
||||
/// Free space in filesystems at data path and logs path.
|
||||
{
|
||||
auto stat = getStatVFS(getContext()->getPath());
|
||||
|
||||
new_values["FilesystemMainPathTotalBytes"] = { stat.f_blocks * stat.f_frsize,
|
||||
"The size of the volume where the main ClickHouse path is mounted, in bytes." };
|
||||
new_values["FilesystemMainPathAvailableBytes"] = { stat.f_bavail * stat.f_frsize,
|
||||
"Available bytes on the volume where the main ClickHouse path is mounted." };
|
||||
new_values["FilesystemMainPathUsedBytes"] = { (stat.f_blocks - stat.f_bavail) * stat.f_frsize,
|
||||
"Used bytes on the volume where the main ClickHouse path is mounted." };
|
||||
new_values["FilesystemMainPathTotalINodes"] = { stat.f_files,
|
||||
"The total number of inodes on the volume where the main ClickHouse path is mounted. If it is less than 25 million, it indicates a misconfiguration." };
|
||||
new_values["FilesystemMainPathAvailableINodes"] = { stat.f_favail,
|
||||
"The number of available inodes on the volume where the main ClickHouse path is mounted. If it is close to zero, it indicates a misconfiguration, and you will get 'no space left on device' even when the disk is not full." };
|
||||
new_values["FilesystemMainPathUsedINodes"] = { stat.f_files - stat.f_favail,
|
||||
"The number of used inodes on the volume where the main ClickHouse path is mounted. This value mostly corresponds to the number of files." };
|
||||
}
|
||||
|
||||
{
|
||||
/// Current working directory of the server is the directory with logs.
|
||||
auto stat = getStatVFS(".");
|
||||
|
||||
new_values["FilesystemLogsPathTotalBytes"] = { stat.f_blocks * stat.f_frsize,
|
||||
"The size of the volume where ClickHouse logs path is mounted, in bytes. It's recommended to have at least 10 GB for logs." };
|
||||
new_values["FilesystemLogsPathAvailableBytes"] = { stat.f_bavail * stat.f_frsize,
|
||||
"Available bytes on the volume where ClickHouse logs path is mounted. If this value approaches zero, you should tune the log rotation in the configuration file." };
|
||||
new_values["FilesystemLogsPathUsedBytes"] = { (stat.f_blocks - stat.f_bavail) * stat.f_frsize,
|
||||
"Used bytes on the volume where ClickHouse logs path is mounted." };
|
||||
new_values["FilesystemLogsPathTotalINodes"] = { stat.f_files,
|
||||
"The total number of inodes on the volume where ClickHouse logs path is mounted." };
|
||||
new_values["FilesystemLogsPathAvailableINodes"] = { stat.f_favail,
|
||||
"The number of available inodes on the volume where ClickHouse logs path is mounted." };
|
||||
new_values["FilesystemLogsPathUsedINodes"] = { stat.f_files - stat.f_favail,
|
||||
"The number of used inodes on the volume where ClickHouse logs path is mounted." };
|
||||
}
|
||||
|
||||
/// Free and total space on every configured disk.
|
||||
{
|
||||
DisksMap disks_map = getContext()->getDisksMap();
|
||||
for (const auto & [name, disk] : disks_map)
|
||||
{
|
||||
auto total = disk->getTotalSpace();
|
||||
|
||||
/// Some disks don't support information about the space.
|
||||
if (!total)
|
||||
continue;
|
||||
|
||||
auto available = disk->getAvailableSpace();
|
||||
auto unreserved = disk->getUnreservedSpace();
|
||||
|
||||
new_values[fmt::format("DiskTotal_{}", name)] = { total,
|
||||
"The total size in bytes of the disk (virtual filesystem). Remote filesystems can show a large value like 16 EiB." };
|
||||
new_values[fmt::format("DiskUsed_{}", name)] = { total - available,
|
||||
"Used bytes on the disk (virtual filesystem). Remote filesystems not always provide this information." };
|
||||
new_values[fmt::format("DiskAvailable_{}", name)] = { available,
|
||||
"Available bytes on the disk (virtual filesystem). Remote filesystems can show a large value like 16 EiB." };
|
||||
new_values[fmt::format("DiskUnreserved_{}", name)] = { unreserved,
|
||||
"Available bytes on the disk (virtual filesystem) without the reservations for merges, fetches, and moves. Remote filesystems can show a large value like 16 EiB." };
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
auto databases = DatabaseCatalog::instance().getDatabases();
|
||||
|
||||
size_t max_queue_size = 0;
|
||||
size_t max_inserts_in_queue = 0;
|
||||
size_t max_merges_in_queue = 0;
|
||||
|
||||
size_t sum_queue_size = 0;
|
||||
size_t sum_inserts_in_queue = 0;
|
||||
size_t sum_merges_in_queue = 0;
|
||||
|
||||
size_t max_absolute_delay = 0;
|
||||
size_t max_relative_delay = 0;
|
||||
|
||||
size_t max_part_count_for_partition = 0;
|
||||
|
||||
size_t number_of_databases = databases.size();
|
||||
size_t total_number_of_tables = 0;
|
||||
|
||||
size_t total_number_of_bytes = 0;
|
||||
size_t total_number_of_rows = 0;
|
||||
size_t total_number_of_parts = 0;
|
||||
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
/// Check if database can contain MergeTree tables
|
||||
if (!db.second->canContainMergeTreeTables())
|
||||
continue;
|
||||
|
||||
for (auto iterator = db.second->getTablesIterator(getContext()); iterator->isValid(); iterator->next())
|
||||
{
|
||||
++total_number_of_tables;
|
||||
const auto & table = iterator->table();
|
||||
if (!table)
|
||||
continue;
|
||||
|
||||
if (MergeTreeData * table_merge_tree = dynamic_cast<MergeTreeData *>(table.get()))
|
||||
{
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
|
||||
calculateMax(max_part_count_for_partition, table_merge_tree->getMaxPartsCountAndSizeForPartition().first);
|
||||
total_number_of_bytes += table_merge_tree->totalBytes(settings).value();
|
||||
total_number_of_rows += table_merge_tree->totalRows(settings).value();
|
||||
total_number_of_parts += table_merge_tree->getPartsCount();
|
||||
}
|
||||
|
||||
if (StorageReplicatedMergeTree * table_replicated_merge_tree = typeid_cast<StorageReplicatedMergeTree *>(table.get()))
|
||||
{
|
||||
StorageReplicatedMergeTree::Status status;
|
||||
table_replicated_merge_tree->getStatus(status, false);
|
||||
|
||||
calculateMaxAndSum(max_queue_size, sum_queue_size, status.queue.queue_size);
|
||||
calculateMaxAndSum(max_inserts_in_queue, sum_inserts_in_queue, status.queue.inserts_in_queue);
|
||||
calculateMaxAndSum(max_merges_in_queue, sum_merges_in_queue, status.queue.merges_in_queue);
|
||||
|
||||
if (!status.is_readonly)
|
||||
{
|
||||
try
|
||||
{
|
||||
time_t absolute_delay = 0;
|
||||
time_t relative_delay = 0;
|
||||
table_replicated_merge_tree->getReplicaDelays(absolute_delay, relative_delay);
|
||||
|
||||
calculateMax(max_absolute_delay, absolute_delay);
|
||||
calculateMax(max_relative_delay, relative_delay);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__,
|
||||
"Cannot get replica delay for table: " + backQuoteIfNeed(db.first) + "." + backQuoteIfNeed(iterator->name()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
new_values["ReplicasMaxQueueSize"] = { max_queue_size, "Maximum queue size (in the number of operations like get, merge) across Replicated tables." };
|
||||
new_values["ReplicasMaxInsertsInQueue"] = { max_inserts_in_queue, "Maximum number of INSERT operations in the queue (still to be replicated) across Replicated tables." };
|
||||
new_values["ReplicasMaxMergesInQueue"] = { max_merges_in_queue, "Maximum number of merge operations in the queue (still to be applied) across Replicated tables." };
|
||||
|
||||
new_values["ReplicasSumQueueSize"] = { sum_queue_size, "Sum queue size (in the number of operations like get, merge) across Replicated tables." };
|
||||
new_values["ReplicasSumInsertsInQueue"] = { sum_inserts_in_queue, "Sum of INSERT operations in the queue (still to be replicated) across Replicated tables." };
|
||||
new_values["ReplicasSumMergesInQueue"] = { sum_merges_in_queue, "Sum of merge operations in the queue (still to be applied) across Replicated tables." };
|
||||
|
||||
new_values["ReplicasMaxAbsoluteDelay"] = { max_absolute_delay, "Maximum difference in seconds between the most fresh replicated part and the most fresh data part still to be replicated, across Replicated tables. A very high value indicates a replica with no data." };
|
||||
new_values["ReplicasMaxRelativeDelay"] = { max_relative_delay, "Maximum difference between the replica delay and the delay of the most up-to-date replica of the same table, across Replicated tables." };
|
||||
|
||||
new_values["MaxPartCountForPartition"] = { max_part_count_for_partition, "Maximum number of parts per partition across all partitions of all tables of MergeTree family. Values larger than 300 indicates misconfiguration, overload, or massive data loading." };
|
||||
|
||||
new_values["NumberOfDatabases"] = { number_of_databases, "Total number of databases on the server." };
|
||||
new_values["NumberOfTables"] = { total_number_of_tables, "Total number of tables summed across the databases on the server, excluding the databases that cannot contain MergeTree tables."
|
||||
" The excluded database engines are those who generate the set of tables on the fly, like `Lazy`, `MySQL`, `PostgreSQL`, `SQlite`."};
|
||||
|
||||
new_values["TotalBytesOfMergeTreeTables"] = { total_number_of_bytes, "Total amount of bytes (compressed, including data and indices) stored in all tables of MergeTree family." };
|
||||
new_values["TotalRowsOfMergeTreeTables"] = { total_number_of_rows, "Total amount of rows (records) stored in all tables of MergeTree family." };
|
||||
new_values["TotalPartsOfMergeTreeTables"] = { total_number_of_parts, "Total amount of data parts in all tables of MergeTree family."
|
||||
" Numbers larger than 10 000 will negatively affect the server startup time and it may indicate unreasonable choice of the partition key." };
|
||||
|
||||
auto get_metric_name_doc = [](const String & name) -> std::pair<const char *, const char *>
|
||||
{
|
||||
static std::map<String, std::pair<const char *, const char *>> metric_map =
|
||||
@ -1691,7 +1416,9 @@ void AsynchronousMetrics::update(TimePoint update_time)
|
||||
{"mysql_port", {"MySQLThreads", "Number of threads in the server of the MySQL compatibility protocol."}},
|
||||
{"postgresql_port", {"PostgreSQLThreads", "Number of threads in the server of the PostgreSQL compatibility protocol."}},
|
||||
{"grpc_port", {"GRPCThreads", "Number of threads in the server of the GRPC protocol."}},
|
||||
{"prometheus.port", {"PrometheusThreads", "Number of threads in the server of the Prometheus endpoint. Note: prometheus endpoints can be also used via the usual HTTP/HTTPs ports."}}
|
||||
{"prometheus.port", {"PrometheusThreads", "Number of threads in the server of the Prometheus endpoint. Note: prometheus endpoints can be also used via the usual HTTP/HTTPs ports."}},
|
||||
{"keeper_server.tcp_port", {"KeeperTCPThreads", "Number of threads in the server of the Keeper TCP protocol (without TLS)."}},
|
||||
{"keeper_server.tcp_port_secure", {"KeeperTCPSecureThreads", "Number of threads in the server of the Keeper TCP protocol (with TLS)."}}
|
||||
};
|
||||
auto it = metric_map.find(name);
|
||||
if (it == metric_map.end())
|
||||
@ -1707,102 +1434,14 @@ void AsynchronousMetrics::update(TimePoint update_time)
|
||||
new_values[name_doc.first] = { server_metric.current_threads, name_doc.second };
|
||||
}
|
||||
}
|
||||
#if USE_NURAFT
|
||||
{
|
||||
auto keeper_dispatcher = getContext()->tryGetKeeperDispatcher();
|
||||
if (keeper_dispatcher)
|
||||
{
|
||||
size_t is_leader = 0;
|
||||
size_t is_follower = 0;
|
||||
size_t is_observer = 0;
|
||||
size_t is_standalone = 0;
|
||||
size_t znode_count = 0;
|
||||
size_t watch_count = 0;
|
||||
size_t ephemerals_count = 0;
|
||||
size_t approximate_data_size = 0;
|
||||
size_t key_arena_size = 0;
|
||||
size_t latest_snapshot_size = 0;
|
||||
size_t open_file_descriptor_count = 0;
|
||||
size_t max_file_descriptor_count = 0;
|
||||
size_t followers = 0;
|
||||
size_t synced_followers = 0;
|
||||
size_t zxid = 0;
|
||||
size_t session_with_watches = 0;
|
||||
size_t paths_watched = 0;
|
||||
size_t snapshot_dir_size = 0;
|
||||
size_t log_dir_size = 0;
|
||||
|
||||
if (keeper_dispatcher->isServerActive())
|
||||
{
|
||||
auto keeper_info = keeper_dispatcher -> getKeeper4LWInfo();
|
||||
is_standalone = static_cast<size_t>(keeper_info.is_standalone);
|
||||
is_leader = static_cast<size_t>(keeper_info.is_leader);
|
||||
is_observer = static_cast<size_t>(keeper_info.is_observer);
|
||||
is_follower = static_cast<size_t>(keeper_info.is_follower);
|
||||
|
||||
zxid = keeper_info.last_zxid;
|
||||
const auto & state_machine = keeper_dispatcher->getStateMachine();
|
||||
znode_count = state_machine.getNodesCount();
|
||||
watch_count = state_machine.getTotalWatchesCount();
|
||||
ephemerals_count = state_machine.getTotalEphemeralNodesCount();
|
||||
approximate_data_size = state_machine.getApproximateDataSize();
|
||||
key_arena_size = state_machine.getKeyArenaSize();
|
||||
latest_snapshot_size = state_machine.getLatestSnapshotBufSize();
|
||||
session_with_watches = state_machine.getSessionsWithWatchesCount();
|
||||
paths_watched = state_machine.getWatchedPathsCount();
|
||||
snapshot_dir_size = keeper_dispatcher->getSnapDirSize();
|
||||
log_dir_size = keeper_dispatcher->getLogDirSize();
|
||||
|
||||
#if defined(__linux__) || defined(__APPLE__)
|
||||
open_file_descriptor_count = getCurrentProcessFDCount();
|
||||
max_file_descriptor_count = getMaxFileDescriptorCount();
|
||||
#endif
|
||||
|
||||
if (keeper_info.is_leader)
|
||||
{
|
||||
followers = keeper_info.follower_count;
|
||||
synced_followers = keeper_info.synced_follower_count;
|
||||
}
|
||||
}
|
||||
|
||||
new_values["KeeperIsLeader"] = { is_leader, "1 if ClickHouse Keeper is a leader, 0 otherwise." };
|
||||
new_values["KeeperIsFollower"] = { is_follower, "1 if ClickHouse Keeper is a follower, 0 otherwise." };
|
||||
new_values["KeeperIsObserver"] = { is_observer, "1 if ClickHouse Keeper is an observer, 0 otherwise." };
|
||||
new_values["KeeperIsStandalone"] = { is_standalone, "1 if ClickHouse Keeper is in a standalone mode, 0 otherwise." };
|
||||
|
||||
new_values["KeeperZnodeCount"] = { znode_count, "The number of nodes (data entries) in ClickHouse Keeper." };
|
||||
new_values["KeeperWatchCount"] = { watch_count, "The number of watches in ClickHouse Keeper." };
|
||||
new_values["KeeperEphemeralsCount"] = { ephemerals_count, "The number of ephemeral nodes in ClickHouse Keeper." };
|
||||
|
||||
new_values["KeeperApproximateDataSize"] = { approximate_data_size, "The approximate data size of ClickHouse Keeper, in bytes." };
|
||||
new_values["KeeperKeyArenaSize"] = { key_arena_size, "The size in bytes of the memory arena for keys in ClickHouse Keeper." };
|
||||
new_values["KeeperLatestSnapshotSize"] = { latest_snapshot_size, "The uncompressed size in bytes of the latest snapshot created by ClickHouse Keeper." };
|
||||
|
||||
new_values["KeeperOpenFileDescriptorCount"] = { open_file_descriptor_count, "The number of open file descriptors in ClickHouse Keeper." };
|
||||
new_values["KeeperMaxFileDescriptorCount"] = { max_file_descriptor_count, "The maximum number of open file descriptors in ClickHouse Keeper." };
|
||||
|
||||
new_values["KeeperFollowers"] = { followers, "The number of followers of ClickHouse Keeper." };
|
||||
new_values["KeeperSyncedFollowers"] = { synced_followers, "The number of followers of ClickHouse Keeper who are also in-sync." };
|
||||
new_values["KeeperZxid"] = { zxid, "The current transaction id number (zxid) in ClickHouse Keeper." };
|
||||
new_values["KeeperSessionWithWatches"] = { session_with_watches, "The number of client sessions of ClickHouse Keeper having watches." };
|
||||
new_values["KeeperPathsWatched"] = { paths_watched, "The number of different paths watched by the clients of ClickHouse Keeper." };
|
||||
new_values["KeeperSnapshotDirSize"] = { snapshot_dir_size, "The size of the snapshots directory of ClickHouse Keeper, in bytes." };
|
||||
new_values["KeeperLogDirSize"] = { log_dir_size, "The size of the logs directory of ClickHouse Keeper, in bytes." };
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
updateHeavyMetricsIfNeeded(current_time, update_time, new_values);
|
||||
|
||||
/// Add more metrics as you wish.
|
||||
|
||||
updateImpl(new_values, update_time, current_time);
|
||||
|
||||
new_values["AsynchronousMetricsCalculationTimeSpent"] = { watch.elapsedSeconds(), "Time in seconds spent for calculation of asynchronous metrics (this is the overhead of asynchronous metrics)." };
|
||||
|
||||
/// Log the new metrics.
|
||||
if (auto asynchronous_metric_log = getContext()->getAsynchronousMetricLog())
|
||||
{
|
||||
asynchronous_metric_log->addValues(new_values);
|
||||
}
|
||||
logImpl(new_values);
|
||||
|
||||
first_run = false;
|
||||
|
||||
@ -1811,75 +1450,4 @@ void AsynchronousMetrics::update(TimePoint update_time)
|
||||
values = new_values;
|
||||
}
|
||||
|
||||
void AsynchronousMetrics::updateDetachedPartsStats()
|
||||
{
|
||||
DetachedPartsStats current_values{};
|
||||
|
||||
for (const auto & db : DatabaseCatalog::instance().getDatabases())
|
||||
{
|
||||
if (!db.second->canContainMergeTreeTables())
|
||||
continue;
|
||||
|
||||
for (auto iterator = db.second->getTablesIterator(getContext()); iterator->isValid(); iterator->next())
|
||||
{
|
||||
const auto & table = iterator->table();
|
||||
if (!table)
|
||||
continue;
|
||||
|
||||
if (MergeTreeData * table_merge_tree = dynamic_cast<MergeTreeData *>(table.get()))
|
||||
{
|
||||
for (const auto & detached_part: table_merge_tree->getDetachedParts())
|
||||
{
|
||||
if (!detached_part.valid_name)
|
||||
continue;
|
||||
|
||||
if (detached_part.prefix.empty())
|
||||
++current_values.detached_by_user;
|
||||
|
||||
++current_values.count;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
detached_parts_stats = current_values;
|
||||
}
|
||||
|
||||
void AsynchronousMetrics::updateHeavyMetricsIfNeeded(TimePoint current_time, TimePoint update_time, AsynchronousMetricValues & new_values)
|
||||
{
|
||||
const auto time_after_previous_update = current_time - heavy_metric_previous_update_time;
|
||||
const bool update_heavy_metric = time_after_previous_update >= heavy_metric_update_period || first_run;
|
||||
|
||||
if (update_heavy_metric)
|
||||
{
|
||||
heavy_metric_previous_update_time = update_time;
|
||||
|
||||
Stopwatch watch;
|
||||
|
||||
/// Test shows that listing 100000 entries consuming around 0.15 sec.
|
||||
updateDetachedPartsStats();
|
||||
|
||||
watch.stop();
|
||||
|
||||
/// Normally heavy metrics don't delay the rest of the metrics calculation
|
||||
/// otherwise log the warning message
|
||||
auto log_level = std::make_pair(DB::LogsLevel::trace, Poco::Message::PRIO_TRACE);
|
||||
if (watch.elapsedSeconds() > (update_period.count() / 2.))
|
||||
log_level = std::make_pair(DB::LogsLevel::debug, Poco::Message::PRIO_DEBUG);
|
||||
else if (watch.elapsedSeconds() > (update_period.count() / 4. * 3))
|
||||
log_level = std::make_pair(DB::LogsLevel::warning, Poco::Message::PRIO_WARNING);
|
||||
LOG_IMPL(log, log_level.first, log_level.second,
|
||||
"Update heavy metrics. "
|
||||
"Update period {} sec. "
|
||||
"Update heavy metrics period {} sec. "
|
||||
"Heavy metrics calculation elapsed: {} sec.",
|
||||
update_period.count(),
|
||||
heavy_metric_update_period.count(),
|
||||
watch.elapsedSeconds());
|
||||
}
|
||||
|
||||
new_values["NumberOfDetachedParts"] = { detached_parts_stats.count, "The total number of parts detached from MergeTree tables. A part can be detached by a user with the `ALTER TABLE DETACH` query or by the server itself it the part is broken, unexpected or unneeded. The server does not care about detached parts and they can be removed." };
|
||||
new_values["NumberOfDetachedByUserParts"] = { detached_parts_stats.detached_by_user, "The total number of parts detached from MergeTree tables by users with the `ALTER TABLE DETACH` query (as opposed to unexpected, broken or ignored parts). The server does not care about detached parts and they can be removed." };
|
||||
}
|
||||
|
||||
}
|
@ -1,6 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Common/MemoryStatisticsOS.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
@ -55,17 +54,15 @@ struct ProtocolServerMetrics
|
||||
* All the values are either gauge type (like the total number of tables, the current memory usage).
|
||||
* Or delta-counters representing some accumulation during the interval of time.
|
||||
*/
|
||||
class AsynchronousMetrics : WithContext
|
||||
class AsynchronousMetrics
|
||||
{
|
||||
public:
|
||||
using ProtocolServerMetricsFunc = std::function<std::vector<ProtocolServerMetrics>()>;
|
||||
AsynchronousMetrics(
|
||||
ContextPtr global_context_,
|
||||
int update_period_seconds,
|
||||
int heavy_metrics_update_period_seconds,
|
||||
const ProtocolServerMetricsFunc & protocol_server_metrics_func_);
|
||||
|
||||
~AsynchronousMetrics();
|
||||
virtual ~AsynchronousMetrics();
|
||||
|
||||
/// Separate method allows to initialize the `servers` variable beforehand.
|
||||
void start();
|
||||
@ -75,12 +72,22 @@ public:
|
||||
/// Returns copy of all values.
|
||||
AsynchronousMetricValues getValues() const;
|
||||
|
||||
private:
|
||||
protected:
|
||||
using Duration = std::chrono::seconds;
|
||||
using TimePoint = std::chrono::system_clock::time_point;
|
||||
|
||||
const Duration update_period;
|
||||
const Duration heavy_metric_update_period;
|
||||
|
||||
/// Some values are incremental and we have to calculate the difference.
|
||||
/// On first run we will only collect the values to subtract later.
|
||||
bool first_run = true;
|
||||
TimePoint previous_update_time;
|
||||
|
||||
Poco::Logger * log;
|
||||
private:
|
||||
virtual void updateImpl(AsynchronousMetricValues & new_values, TimePoint update_time, TimePoint current_time) = 0;
|
||||
virtual void logImpl(AsynchronousMetricValues &) {}
|
||||
|
||||
ProtocolServerMetricsFunc protocol_server_metrics_func;
|
||||
|
||||
mutable std::mutex mutex;
|
||||
@ -88,20 +95,6 @@ private:
|
||||
bool quit {false};
|
||||
AsynchronousMetricValues values;
|
||||
|
||||
/// Some values are incremental and we have to calculate the difference.
|
||||
/// On first run we will only collect the values to subtract later.
|
||||
bool first_run = true;
|
||||
TimePoint previous_update_time;
|
||||
TimePoint heavy_metric_previous_update_time;
|
||||
|
||||
struct DetachedPartsStats
|
||||
{
|
||||
size_t count;
|
||||
size_t detached_by_user;
|
||||
};
|
||||
|
||||
DetachedPartsStats detached_parts_stats{};
|
||||
|
||||
#if defined(OS_LINUX) || defined(OS_FREEBSD)
|
||||
MemoryStatisticsOS memory_stat;
|
||||
#endif
|
||||
@ -212,11 +205,6 @@ private:
|
||||
|
||||
void run();
|
||||
void update(TimePoint update_time);
|
||||
|
||||
void updateDetachedPartsStats();
|
||||
void updateHeavyMetricsIfNeeded(TimePoint current_time, TimePoint update_time, AsynchronousMetricValues & new_values);
|
||||
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
@ -1204,6 +1204,11 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename DateOrTime>
|
||||
inline DateTimeComponents toDateTimeComponents(DateOrTime v) const
|
||||
{
|
||||
return toDateTimeComponents(lut[toLUTIndex(v)].date);
|
||||
}
|
||||
|
||||
inline UInt64 toNumYYYYMMDDhhmmss(Time t) const
|
||||
{
|
||||
|
@ -637,8 +637,9 @@
|
||||
M(666, CANNOT_USE_CACHE) \
|
||||
M(667, NOT_INITIALIZED) \
|
||||
M(668, INVALID_STATE) \
|
||||
M(669, UNKNOWN_NAMED_COLLECTION) \
|
||||
M(669, NAMED_COLLECTION_DOESNT_EXIST) \
|
||||
M(670, NAMED_COLLECTION_ALREADY_EXISTS) \
|
||||
M(671, NAMED_COLLECTION_IS_IMMUTABLE) \
|
||||
\
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
M(1000, POCO_EXCEPTION) \
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/VariableContext.h>
|
||||
#include <Interpreters/TraceCollector.h>
|
||||
#include <Common/TraceSender.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/LockMemoryExceptionInThread.h>
|
||||
#include <Common/MemoryTrackerBlockerInThread.h>
|
||||
@ -178,7 +178,7 @@ void MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryT
|
||||
if (unlikely(current_profiler_limit && will_be > current_profiler_limit))
|
||||
{
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
DB::TraceCollector::collect(DB::TraceType::Memory, StackTrace(), size);
|
||||
DB::TraceSender::send(DB::TraceType::Memory, StackTrace(), {.size = size});
|
||||
setOrRaiseProfilerLimit((will_be + profiler_step - 1) / profiler_step * profiler_step);
|
||||
allocation_traced = true;
|
||||
}
|
||||
@ -187,7 +187,7 @@ void MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryT
|
||||
if (unlikely(sample_probability > 0.0 && sample(thread_local_rng)))
|
||||
{
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
DB::TraceCollector::collect(DB::TraceType::MemorySample, StackTrace(), size);
|
||||
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = size});
|
||||
allocation_traced = true;
|
||||
}
|
||||
|
||||
@ -305,7 +305,7 @@ void MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryT
|
||||
if (peak_updated && allocation_traced)
|
||||
{
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
DB::TraceCollector::collect(DB::TraceType::MemoryPeak, StackTrace(), will_be);
|
||||
DB::TraceSender::send(DB::TraceType::MemoryPeak, StackTrace(), {.size = will_be});
|
||||
}
|
||||
|
||||
if (auto * loaded_next = parent.load(std::memory_order_relaxed))
|
||||
@ -361,7 +361,7 @@ void MemoryTracker::free(Int64 size)
|
||||
if (unlikely(sample_probability > 0.0 && sample(thread_local_rng)))
|
||||
{
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
DB::TraceCollector::collect(DB::TraceType::MemorySample, StackTrace(), -size);
|
||||
DB::TraceSender::send(DB::TraceType::MemorySample, StackTrace(), {.size = -size});
|
||||
}
|
||||
|
||||
Int64 accounted_size = size;
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/TraceSender.h>
|
||||
|
||||
|
||||
/// Available events. Add something here as you wish.
|
||||
@ -433,6 +434,15 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(KeeperSnapshotApplysFailed, "Number of failed snapshot applying")\
|
||||
M(KeeperReadSnapshot, "Number of snapshot read(serialization)")\
|
||||
M(KeeperSaveSnapshot, "Number of snapshot save")\
|
||||
M(KeeperCreateRequest, "Number of create requests")\
|
||||
M(KeeperRemoveRequest, "Number of remove requests")\
|
||||
M(KeeperSetRequest, "Number of set requests")\
|
||||
M(KeeperCheckRequest, "Number of check requests")\
|
||||
M(KeeperMultiRequest, "Number of multi requests")\
|
||||
M(KeeperMultiReadRequest, "Number of multi read requests")\
|
||||
M(KeeperGetRequest, "Number of get requests")\
|
||||
M(KeeperListRequest, "Number of list requests")\
|
||||
M(KeeperExistsRequest, "Number of exists requests")\
|
||||
\
|
||||
M(OverflowBreak, "Number of times, data processing was cancelled by query complexity limitation with setting '*_overflow_mode' = 'break' and the result is incomplete.") \
|
||||
M(OverflowThrow, "Number of times, data processing was cancelled by query complexity limitation with setting '*_overflow_mode' = 'throw' and exception was thrown.") \
|
||||
@ -514,15 +524,29 @@ const char * getDocumentation(Event event)
|
||||
return strings[event];
|
||||
}
|
||||
|
||||
|
||||
Event end() { return END; }
|
||||
|
||||
|
||||
void increment(Event event, Count amount)
|
||||
{
|
||||
DB::CurrentThread::getProfileEvents().increment(event, amount);
|
||||
}
|
||||
|
||||
void Counters::increment(Event event, Count amount)
|
||||
{
|
||||
Counters * current = this;
|
||||
bool send_to_trace_log = false;
|
||||
|
||||
do
|
||||
{
|
||||
send_to_trace_log |= current->trace_profile_events;
|
||||
current->counters[event].fetch_add(amount, std::memory_order_relaxed);
|
||||
current = current->parent;
|
||||
} while (current != nullptr);
|
||||
|
||||
if (unlikely(send_to_trace_log))
|
||||
DB::TraceSender::send(DB::TraceType::ProfileEvent, StackTrace(), {.event = event, .increment = amount});
|
||||
}
|
||||
|
||||
CountersIncrement::CountersIncrement(Counters::Snapshot const & snapshot)
|
||||
{
|
||||
init();
|
||||
|
@ -25,10 +25,12 @@ namespace ProfileEvents
|
||||
|
||||
class Counters
|
||||
{
|
||||
private:
|
||||
Counter * counters = nullptr;
|
||||
std::unique_ptr<Counter[]> counters_holder;
|
||||
/// Used to propagate increments
|
||||
Counters * parent = nullptr;
|
||||
bool trace_profile_events = false;
|
||||
|
||||
public:
|
||||
|
||||
@ -51,15 +53,7 @@ namespace ProfileEvents
|
||||
return counters[event];
|
||||
}
|
||||
|
||||
inline void increment(Event event, Count amount = 1)
|
||||
{
|
||||
Counters * current = this;
|
||||
do
|
||||
{
|
||||
current->counters[event].fetch_add(amount, std::memory_order_relaxed);
|
||||
current = current->parent;
|
||||
} while (current != nullptr);
|
||||
}
|
||||
void increment(Event event, Count amount = 1);
|
||||
|
||||
struct Snapshot
|
||||
{
|
||||
@ -97,6 +91,11 @@ namespace ProfileEvents
|
||||
parent = parent_;
|
||||
}
|
||||
|
||||
void setTraceProfileEvents(bool value)
|
||||
{
|
||||
trace_profile_events = value;
|
||||
}
|
||||
|
||||
/// Set all counters to zero
|
||||
void resetCounters();
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include "QueryProfiler.h"
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/TraceCollector.h>
|
||||
#include <Common/TraceSender.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/StackTrace.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
@ -66,7 +66,7 @@ namespace
|
||||
const auto signal_context = *reinterpret_cast<ucontext_t *>(context);
|
||||
const StackTrace stack_trace(signal_context);
|
||||
|
||||
TraceCollector::collect(trace_type, stack_trace, 0);
|
||||
TraceSender::send(trace_type, stack_trace, {});
|
||||
ProfileEvents::increment(ProfileEvents::QueryProfilerRuns);
|
||||
|
||||
errno = saved_errno;
|
||||
|
@ -14,7 +14,7 @@ namespace
|
||||
/// The performance test query ids can be surprisingly long like
|
||||
/// `aggregating_merge_tree_simple_aggregate_function_string.query100.profile100`,
|
||||
/// so make some allowance for them as well.
|
||||
constexpr size_t QUERY_ID_MAX_LEN = 128;
|
||||
constexpr size_t QUERY_ID_MAX_LEN = 100;
|
||||
static_assert(QUERY_ID_MAX_LEN <= std::numeric_limits<uint8_t>::max());
|
||||
}
|
||||
|
||||
@ -23,7 +23,7 @@ namespace DB
|
||||
|
||||
LazyPipeFDs TraceSender::pipe;
|
||||
|
||||
void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Int64 size)
|
||||
void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Extras extras)
|
||||
{
|
||||
constexpr size_t buf_size = sizeof(char) /// TraceCollector stop flag
|
||||
+ sizeof(UInt8) /// String size
|
||||
@ -32,12 +32,14 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Int
|
||||
+ sizeof(StackTrace::FramePointers) /// Collected stack trace, maximum capacity
|
||||
+ sizeof(TraceType) /// trace type
|
||||
+ sizeof(UInt64) /// thread_id
|
||||
+ sizeof(Int64); /// size
|
||||
+ sizeof(Int64) /// size
|
||||
+ sizeof(ProfileEvents::Event) /// event
|
||||
+ sizeof(ProfileEvents::Count); /// increment
|
||||
|
||||
/// Write should be atomic to avoid overlaps
|
||||
/// (since recursive collect() is possible)
|
||||
static_assert(PIPE_BUF >= 512);
|
||||
static_assert(buf_size <= 512, "Only write of PIPE_BUF to pipe is atomic and the minimal known PIPE_BUF across supported platforms is 512");
|
||||
static_assert(buf_size <= PIPE_BUF, "Only write of PIPE_BUF to pipe is atomic and the minimal known PIPE_BUF across supported platforms is 512");
|
||||
|
||||
char buffer[buf_size];
|
||||
WriteBufferFromFileDescriptorDiscardOnFailure out(pipe.fds_rw[1], buf_size, buffer);
|
||||
@ -71,7 +73,9 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Int
|
||||
|
||||
writePODBinary(trace_type, out);
|
||||
writePODBinary(thread_id, out);
|
||||
writePODBinary(size, out);
|
||||
writePODBinary(extras.size, out);
|
||||
writePODBinary(extras.event, out);
|
||||
writePODBinary(extras.increment, out);
|
||||
|
||||
out.next();
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/PipeFDs.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <base/types.h>
|
||||
|
||||
class StackTrace;
|
||||
@ -17,6 +18,7 @@ enum class TraceType : uint8_t
|
||||
Memory,
|
||||
MemorySample,
|
||||
MemoryPeak,
|
||||
ProfileEvent,
|
||||
};
|
||||
|
||||
/// This is the second part of TraceCollector, that sends stacktrace to the pipe.
|
||||
@ -24,10 +26,18 @@ enum class TraceType : uint8_t
|
||||
class TraceSender
|
||||
{
|
||||
public:
|
||||
struct Extras
|
||||
{
|
||||
/// size - for memory tracing is the amount of memory allocated; for other trace types it is 0.
|
||||
Int64 size{};
|
||||
/// Event type and increment for 'ProfileEvent' trace type; for other trace types defaults.
|
||||
ProfileEvents::Event event{ProfileEvents::end()};
|
||||
ProfileEvents::Count increment{};
|
||||
};
|
||||
|
||||
/// Collect a stack trace. This method is signal safe.
|
||||
/// Precondition: the TraceCollector object must be created.
|
||||
/// size - for memory tracing is the amount of memory allocated; for other trace types it is 0.
|
||||
static void send(TraceType trace_type, const StackTrace & stack_trace, Int64 size);
|
||||
static void send(TraceType trace_type, const StackTrace & stack_trace, Extras extras);
|
||||
|
||||
private:
|
||||
friend class TraceCollector;
|
||||
|
@ -42,15 +42,15 @@ public:
|
||||
return s;
|
||||
}
|
||||
|
||||
template <typename ValueType, typename ParseFunction>
|
||||
template <typename ValueType, bool ReturnDefault, typename ParseFunction>
|
||||
static ValueType getValue(const Node * node, const std::string & path,
|
||||
const std::optional<ValueType> & default_value, const ParseFunction & parse_function)
|
||||
const ValueType & default_value, const ParseFunction & parse_function)
|
||||
{
|
||||
const auto * value_node = node->getNodeByPath(path);
|
||||
if (!value_node)
|
||||
{
|
||||
if (default_value)
|
||||
return *default_value;
|
||||
if constexpr (ReturnDefault)
|
||||
return default_value;
|
||||
else
|
||||
throw Poco::NotFoundException(path);
|
||||
}
|
||||
@ -59,34 +59,64 @@ public:
|
||||
};
|
||||
|
||||
|
||||
std::string getString(const Node * node, const std::string & path, const std::optional<std::string> & default_value)
|
||||
std::string getString(const Node * node, const std::string & path)
|
||||
{
|
||||
return ParseHelper::getValue<std::string>(node, path, default_value, ParseHelper::parseString);
|
||||
return ParseHelper::getValue<std::string, false>(node, path, {}, ParseHelper::parseString);
|
||||
}
|
||||
|
||||
Int64 getInt64(const Node * node, const std::string & path, const std::optional<Int64> & default_value)
|
||||
std::string getString(const Node * node, const std::string & path, const std::string & default_value)
|
||||
{
|
||||
return ParseHelper::getValue<Int64>(node, path, default_value, ParseHelper::parseInt64);
|
||||
return ParseHelper::getValue<std::string, true>(node, path, default_value, ParseHelper::parseString);
|
||||
}
|
||||
|
||||
UInt64 getUInt64(const Node * node, const std::string & path, const std::optional<UInt64> & default_value)
|
||||
Int64 getInt64(const Node * node, const std::string & path)
|
||||
{
|
||||
return ParseHelper::getValue<UInt64>(node, path, default_value, ParseHelper::parseUInt64);
|
||||
return ParseHelper::getValue<Int64, false>(node, path, {}, ParseHelper::parseInt64);
|
||||
}
|
||||
|
||||
int getInt(const Node * node, const std::string & path, const std::optional<int> & default_value)
|
||||
Int64 getInt64(const Node * node, const std::string & path, Int64 default_value)
|
||||
{
|
||||
return ParseHelper::getValue<int>(node, path, default_value, ParseHelper::parseInt);
|
||||
return ParseHelper::getValue<Int64, true>(node, path, default_value, ParseHelper::parseInt64);
|
||||
}
|
||||
|
||||
unsigned getUInt(const Node * node, const std::string & path, const std::optional<unsigned> & default_value)
|
||||
UInt64 getUInt64(const Node * node, const std::string & path)
|
||||
{
|
||||
return ParseHelper::getValue<unsigned>(node, path, default_value, ParseHelper::parseUInt);
|
||||
return ParseHelper::getValue<UInt64, false>(node, path, {}, ParseHelper::parseUInt64);
|
||||
}
|
||||
|
||||
bool getBool(const Node * node, const std::string & path, const std::optional<bool> & default_value)
|
||||
UInt64 getUInt64(const Node * node, const std::string & path, UInt64 default_value)
|
||||
{
|
||||
return ParseHelper::getValue<bool>(node, path, default_value, ParseHelper::parseBool);
|
||||
return ParseHelper::getValue<UInt64, true>(node, path, default_value, ParseHelper::parseUInt64);
|
||||
}
|
||||
|
||||
int getInt(const Node * node, const std::string & path)
|
||||
{
|
||||
return ParseHelper::getValue<int, false>(node, path, {}, ParseHelper::parseInt);
|
||||
}
|
||||
|
||||
int getInt(const Node * node, const std::string & path, int default_value)
|
||||
{
|
||||
return ParseHelper::getValue<int, true>(node, path, default_value, ParseHelper::parseInt);
|
||||
}
|
||||
|
||||
unsigned getUInt(const Node * node, const std::string & path)
|
||||
{
|
||||
return ParseHelper::getValue<unsigned, false>(node, path, {}, ParseHelper::parseUInt);
|
||||
}
|
||||
|
||||
unsigned getUInt(const Node * node, const std::string & path, unsigned default_value)
|
||||
{
|
||||
return ParseHelper::getValue<unsigned, true>(node, path, default_value, ParseHelper::parseUInt);
|
||||
}
|
||||
|
||||
bool getBool(const Node * node, const std::string & path)
|
||||
{
|
||||
return ParseHelper::getValue<bool, false>(node, path, {}, ParseHelper::parseBool);
|
||||
}
|
||||
|
||||
bool getBool(const Node * node, const std::string & path, bool default_value)
|
||||
{
|
||||
return ParseHelper::getValue<bool, true>(node, path, default_value, ParseHelper::parseBool);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -7,17 +7,26 @@
|
||||
|
||||
namespace DB:: XMLUtils
|
||||
{
|
||||
/// Returns root element of the document.
|
||||
Poco::XML::Node * getRootNode(Poco::XML::Document * document);
|
||||
|
||||
std::string getString(const Poco::XML::Node * node, const std::string & path, const std::optional<std::string> & default_value = std::nullopt);
|
||||
/// Finds the element in the node's subtree by the specified path and returns its inner text
|
||||
/// trying to parse it as the requested type.
|
||||
/// Throws an exception if path is not found.
|
||||
std::string getString(const Poco::XML::Node * node, const std::string & path);
|
||||
Int64 getInt64(const Poco::XML::Node * node, const std::string & path);
|
||||
UInt64 getUInt64(const Poco::XML::Node * node, const std::string & path);
|
||||
int getInt(const Poco::XML::Node * node, const std::string & path);
|
||||
unsigned getUInt(const Poco::XML::Node * node, const std::string & path);
|
||||
bool getBool(const Poco::XML::Node * node, const std::string & path);
|
||||
|
||||
Int64 getInt64(const Poco::XML::Node * node, const std::string & path, const std::optional<Int64> & default_value = std::nullopt);
|
||||
|
||||
UInt64 getUInt64(const Poco::XML::Node * node, const std::string & path, const std::optional<UInt64> & default_value = std::nullopt);
|
||||
|
||||
int getInt(const Poco::XML::Node * node, const std::string & path, const std::optional<int> & default_value = std::nullopt);
|
||||
|
||||
unsigned getUInt(const Poco::XML::Node * node, const std::string & path, const std::optional<unsigned> & default_value = std::nullopt);
|
||||
|
||||
bool getBool(const Poco::XML::Node * node, const std::string & path, const std::optional<bool> & default_value = std::nullopt);
|
||||
/// Finds the element in the node's subtree by the specified path and returns its inner text
|
||||
/// trying to parse it as the requested type.
|
||||
/// Returns the specified default value if path is not found.
|
||||
std::string getString(const Poco::XML::Node * node, const std::string & path, const std::string & default_value);
|
||||
Int64 getInt64(const Poco::XML::Node * node, const std::string & path, Int64 default_value);
|
||||
UInt64 getUInt64(const Poco::XML::Node * node, const std::string & path, UInt64 default_value);
|
||||
int getInt(const Poco::XML::Node * node, const std::string & path, int default_value);
|
||||
unsigned getUInt(const Poco::XML::Node * node, const std::string & path, unsigned default_value);
|
||||
bool getBool(const Poco::XML::Node * node, const std::string & path, bool default_value);
|
||||
}
|
||||
|
@ -466,7 +466,7 @@ void ZooKeeper::connect(
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_TEST(log, "Connected to ZooKeeper at {} with session_id {}{}", socket.peerAddress().toString(), session_id, fail_reasons.str());
|
||||
LOG_INFO(log, "Connected to ZooKeeper at {} with session_id {}{}", socket.peerAddress().toString(), session_id, fail_reasons.str());
|
||||
}
|
||||
}
|
||||
|
||||
@ -867,12 +867,12 @@ void ZooKeeper::finalize(bool error_send, bool error_receive, const String & rea
|
||||
/// If some thread (send/receive) already finalizing session don't try to do it
|
||||
bool already_started = finalization_started.test_and_set();
|
||||
|
||||
LOG_TEST(log, "Finalizing session {}: finalization_started={}, queue_finished={}, reason={}",
|
||||
session_id, already_started, requests_queue.isFinished(), reason);
|
||||
|
||||
if (already_started)
|
||||
return;
|
||||
|
||||
LOG_INFO(log, "Finalizing session {}: finalization_started={}, queue_finished={}, reason={}",
|
||||
session_id, already_started, requests_queue.isFinished(), reason);
|
||||
|
||||
auto expire_session_if_not_expired = [&]
|
||||
{
|
||||
/// No new requests will appear in queue after finish()
|
||||
|
@ -117,7 +117,7 @@ public:
|
||||
|
||||
WriteBuffer * working_buf = compressed_buffer ? compressed_buffer->getNestedBuffer() : file_buf.get();
|
||||
|
||||
/// Flush working buffer to file system
|
||||
/// Flush working buffer to file system
|
||||
working_buf->next();
|
||||
|
||||
/// Fsync file system if needed
|
||||
@ -280,6 +280,7 @@ Changelog::Changelog(
|
||||
, force_sync(force_sync_)
|
||||
, log(log_)
|
||||
, compress_logs(compress_logs_)
|
||||
, write_operations(std::numeric_limits<size_t>::max())
|
||||
{
|
||||
/// Load all files in changelog directory
|
||||
namespace fs = std::filesystem;
|
||||
@ -299,10 +300,13 @@ Changelog::Changelog(
|
||||
LOG_WARNING(log, "No logs exists in {}. It's Ok if it's the first run of clickhouse-keeper.", changelogs_dir.generic_string());
|
||||
|
||||
clean_log_thread = ThreadFromGlobalPool([this] { cleanLogThread(); });
|
||||
|
||||
write_thread = ThreadFromGlobalPool([this] { writeThread(); });
|
||||
}
|
||||
|
||||
void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uint64_t logs_to_keep)
|
||||
{
|
||||
std::lock_guard writer_lock(writer_mutex);
|
||||
std::optional<ChangelogReadResult> last_log_read_result;
|
||||
|
||||
/// Last log has some free space to write
|
||||
@ -336,7 +340,7 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin
|
||||
removeAllLogs();
|
||||
min_log_id = last_commited_log_index;
|
||||
max_log_id = last_commited_log_index == 0 ? 0 : last_commited_log_index - 1;
|
||||
rotate(max_log_id + 1);
|
||||
rotate(max_log_id + 1, writer_lock);
|
||||
return;
|
||||
}
|
||||
else if (changelog_description.from_log_index > start_to_read_from)
|
||||
@ -427,7 +431,9 @@ void Changelog::readChangelogAndInitWriter(uint64_t last_commited_log_index, uin
|
||||
|
||||
/// Start new log if we don't initialize writer from previous log. All logs can be "complete".
|
||||
if (!current_writer)
|
||||
rotate(max_log_id + 1);
|
||||
rotate(max_log_id + 1, writer_lock);
|
||||
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
|
||||
@ -500,10 +506,11 @@ void Changelog::removeAllLogs()
|
||||
logs.clear();
|
||||
}
|
||||
|
||||
void Changelog::rotate(uint64_t new_start_log_index)
|
||||
void Changelog::rotate(uint64_t new_start_log_index, std::lock_guard<std::mutex> &)
|
||||
{
|
||||
/// Flush previous log
|
||||
flush();
|
||||
if (current_writer)
|
||||
current_writer->flush(force_sync);
|
||||
|
||||
/// Start new one
|
||||
ChangelogFileDescription new_description;
|
||||
@ -540,50 +547,96 @@ ChangelogRecord Changelog::buildRecord(uint64_t index, const LogEntryPtr & log_e
|
||||
return record;
|
||||
}
|
||||
|
||||
void Changelog::writeThread()
|
||||
{
|
||||
WriteOperation write_operation;
|
||||
while (write_operations.pop(write_operation))
|
||||
{
|
||||
assert(initialized);
|
||||
|
||||
if (auto * append_log = std::get_if<AppendLog>(&write_operation))
|
||||
{
|
||||
std::lock_guard writer_lock(writer_mutex);
|
||||
assert(current_writer);
|
||||
|
||||
const auto & current_changelog_description = existing_changelogs[current_writer->getStartIndex()];
|
||||
const bool log_is_complete = append_log->index - current_writer->getStartIndex() == current_changelog_description.expectedEntriesCountInLog();
|
||||
|
||||
if (log_is_complete)
|
||||
rotate(append_log->index, writer_lock);
|
||||
|
||||
current_writer->appendRecord(buildRecord(append_log->index, append_log->log_entry));
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto & flush = std::get<Flush>(write_operation);
|
||||
|
||||
{
|
||||
std::lock_guard writer_lock(writer_mutex);
|
||||
if (current_writer)
|
||||
current_writer->flush(force_sync);
|
||||
}
|
||||
|
||||
{
|
||||
std::lock_guard lock{durable_idx_mutex};
|
||||
last_durable_idx = flush.index;
|
||||
}
|
||||
|
||||
durable_idx_cv.notify_all();
|
||||
|
||||
// we shouldn't start the raft_server before sending it here
|
||||
if (auto raft_server_locked = raft_server.lock())
|
||||
raft_server_locked->notify_log_append_completion(true);
|
||||
else
|
||||
LOG_WARNING(log, "Raft server is not set in LogStore.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Changelog::appendEntry(uint64_t index, const LogEntryPtr & log_entry)
|
||||
{
|
||||
if (!current_writer)
|
||||
if (!initialized)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Changelog must be initialized before appending records");
|
||||
|
||||
if (logs.empty())
|
||||
min_log_id = index;
|
||||
|
||||
const auto & current_changelog_description = existing_changelogs[current_writer->getStartIndex()];
|
||||
const bool log_is_complete = index - current_writer->getStartIndex() == current_changelog_description.expectedEntriesCountInLog();
|
||||
|
||||
if (log_is_complete)
|
||||
rotate(index);
|
||||
|
||||
current_writer->appendRecord(buildRecord(index, log_entry));
|
||||
logs[index] = log_entry;
|
||||
max_log_id = index;
|
||||
|
||||
if (!write_operations.tryPush(AppendLog{index, log_entry}))
|
||||
LOG_WARNING(log, "Changelog is shut down");
|
||||
}
|
||||
|
||||
void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry)
|
||||
{
|
||||
/// This write_at require to overwrite everything in this file and also in previous file(s)
|
||||
const bool go_to_previous_file = index < current_writer->getStartIndex();
|
||||
|
||||
if (go_to_previous_file)
|
||||
{
|
||||
auto index_changelog = existing_changelogs.lower_bound(index);
|
||||
std::lock_guard lock(writer_mutex);
|
||||
/// This write_at require to overwrite everything in this file and also in previous file(s)
|
||||
const bool go_to_previous_file = index < current_writer->getStartIndex();
|
||||
|
||||
ChangelogFileDescription description;
|
||||
|
||||
if (index_changelog->first == index) /// exactly this file starts from index
|
||||
description = index_changelog->second;
|
||||
else
|
||||
description = std::prev(index_changelog)->second;
|
||||
|
||||
/// Initialize writer from this log file
|
||||
current_writer = std::make_unique<ChangelogWriter>(description.path, WriteMode::Append, index_changelog->first);
|
||||
|
||||
/// Remove all subsequent files if overwritten something in previous one
|
||||
auto to_remove_itr = existing_changelogs.upper_bound(index);
|
||||
for (auto itr = to_remove_itr; itr != existing_changelogs.end();)
|
||||
if (go_to_previous_file)
|
||||
{
|
||||
std::filesystem::remove(itr->second.path);
|
||||
itr = existing_changelogs.erase(itr);
|
||||
auto index_changelog = existing_changelogs.lower_bound(index);
|
||||
|
||||
ChangelogFileDescription description;
|
||||
|
||||
if (index_changelog->first == index) /// exactly this file starts from index
|
||||
description = index_changelog->second;
|
||||
else
|
||||
description = std::prev(index_changelog)->second;
|
||||
|
||||
/// Initialize writer from this log file
|
||||
current_writer = std::make_unique<ChangelogWriter>(description.path, WriteMode::Append, index_changelog->first);
|
||||
|
||||
/// Remove all subsequent files if overwritten something in previous one
|
||||
auto to_remove_itr = existing_changelogs.upper_bound(index);
|
||||
for (auto itr = to_remove_itr; itr != existing_changelogs.end();)
|
||||
{
|
||||
std::filesystem::remove(itr->second.path);
|
||||
itr = existing_changelogs.erase(itr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -597,6 +650,7 @@ void Changelog::writeAt(uint64_t index, const LogEntryPtr & log_entry)
|
||||
|
||||
void Changelog::compact(uint64_t up_to_log_index)
|
||||
{
|
||||
std::lock_guard lock(writer_mutex);
|
||||
LOG_INFO(log, "Compact logs up to log index {}, our max log id is {}", up_to_log_index, max_log_id);
|
||||
|
||||
bool remove_all_logs = false;
|
||||
@ -643,7 +697,7 @@ void Changelog::compact(uint64_t up_to_log_index)
|
||||
std::erase_if(logs, [up_to_log_index] (const auto & item) { return item.first <= up_to_log_index; });
|
||||
|
||||
if (need_rotate)
|
||||
rotate(up_to_log_index + 1);
|
||||
rotate(up_to_log_index + 1, lock);
|
||||
|
||||
LOG_INFO(log, "Compaction up to {} finished new min index {}, new max index {}", up_to_log_index, min_log_id, max_log_id);
|
||||
}
|
||||
@ -747,8 +801,19 @@ void Changelog::applyEntriesFromBuffer(uint64_t index, nuraft::buffer & buffer)
|
||||
|
||||
void Changelog::flush()
|
||||
{
|
||||
if (current_writer)
|
||||
current_writer->flush(force_sync);
|
||||
if (flushAsync())
|
||||
{
|
||||
std::unique_lock lock{durable_idx_mutex};
|
||||
durable_idx_cv.wait(lock, [&] { return last_durable_idx == max_log_id; });
|
||||
}
|
||||
}
|
||||
|
||||
bool Changelog::flushAsync()
|
||||
{
|
||||
bool pushed = write_operations.push(Flush{max_log_id});
|
||||
if (!pushed)
|
||||
LOG_WARNING(log, "Changelog is shut down");
|
||||
return pushed;
|
||||
}
|
||||
|
||||
void Changelog::shutdown()
|
||||
@ -758,6 +823,12 @@ void Changelog::shutdown()
|
||||
|
||||
if (clean_log_thread.joinable())
|
||||
clean_log_thread.join();
|
||||
|
||||
if (!write_operations.isFinished())
|
||||
write_operations.finish();
|
||||
|
||||
if (write_thread.joinable())
|
||||
write_thread.join();
|
||||
}
|
||||
|
||||
Changelog::~Changelog()
|
||||
@ -789,4 +860,10 @@ void Changelog::cleanLogThread()
|
||||
}
|
||||
}
|
||||
|
||||
void Changelog::setRaftServer(const nuraft::ptr<nuraft::raft_server> & raft_server_)
|
||||
{
|
||||
assert(raft_server_);
|
||||
raft_server = raft_server_;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,8 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <libnuraft/nuraft.hxx>
|
||||
#include <libnuraft/raft_server.hxx>
|
||||
#include <city.h>
|
||||
#include <optional>
|
||||
#include <base/defines.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/HashingWriteBuffer.h>
|
||||
#include <IO/CompressionMethod.h>
|
||||
@ -121,6 +123,8 @@ public:
|
||||
/// Fsync latest log to disk and flush buffer
|
||||
void flush();
|
||||
|
||||
bool flushAsync();
|
||||
|
||||
void shutdown();
|
||||
|
||||
uint64_t size() const
|
||||
@ -128,6 +132,14 @@ public:
|
||||
return logs.size();
|
||||
}
|
||||
|
||||
uint64_t lastDurableIndex() const
|
||||
{
|
||||
std::lock_guard lock{durable_idx_mutex};
|
||||
return last_durable_idx;
|
||||
}
|
||||
|
||||
void setRaftServer(const nuraft::ptr<nuraft::raft_server> & raft_server_);
|
||||
|
||||
/// Fsync log to disk
|
||||
~Changelog();
|
||||
|
||||
@ -136,7 +148,7 @@ private:
|
||||
static ChangelogRecord buildRecord(uint64_t index, const LogEntryPtr & log_entry);
|
||||
|
||||
/// Starts new file [new_start_log_index, new_start_log_index + rotate_interval]
|
||||
void rotate(uint64_t new_start_log_index);
|
||||
void rotate(uint64_t new_start_log_index, std::lock_guard<std::mutex> & writer_lock);
|
||||
|
||||
/// Currently existing changelogs
|
||||
std::map<uint64_t, ChangelogFileDescription> existing_changelogs;
|
||||
@ -162,7 +174,7 @@ private:
|
||||
Poco::Logger * log;
|
||||
bool compress_logs;
|
||||
|
||||
|
||||
std::mutex writer_mutex;
|
||||
/// Current writer for changelog file
|
||||
std::unique_ptr<ChangelogWriter> current_writer;
|
||||
/// Mapping log_id -> log_entry
|
||||
@ -175,6 +187,33 @@ private:
|
||||
/// 128 is enough, even if log is not removed, it's not a problem
|
||||
ConcurrentBoundedQueue<std::string> log_files_to_delete_queue{128};
|
||||
ThreadFromGlobalPool clean_log_thread;
|
||||
|
||||
struct AppendLog
|
||||
{
|
||||
uint64_t index;
|
||||
nuraft::ptr<nuraft::log_entry> log_entry;
|
||||
};
|
||||
|
||||
struct Flush
|
||||
{
|
||||
uint64_t index;
|
||||
};
|
||||
|
||||
using WriteOperation = std::variant<AppendLog, Flush>;
|
||||
|
||||
void writeThread();
|
||||
|
||||
ThreadFromGlobalPool write_thread;
|
||||
ConcurrentBoundedQueue<WriteOperation> write_operations;
|
||||
|
||||
// last_durable_index needs to be exposed through const getter so we make mutex mutable
|
||||
mutable std::mutex durable_idx_mutex;
|
||||
std::condition_variable durable_idx_cv;
|
||||
uint64_t last_durable_idx{0};
|
||||
|
||||
nuraft::wptr<nuraft::raft_server> raft_server;
|
||||
|
||||
bool initialized = false;
|
||||
};
|
||||
|
||||
}
|
||||
|
127
src/Coordination/KeeperAsynchronousMetrics.cpp
Normal file
127
src/Coordination/KeeperAsynchronousMetrics.cpp
Normal file
@ -0,0 +1,127 @@
|
||||
#include <Coordination/KeeperAsynchronousMetrics.h>
|
||||
|
||||
#include <Coordination/KeeperDispatcher.h>
|
||||
|
||||
#include <Common/getCurrentProcessFDCount.h>
|
||||
#include <Common/getMaxFileDescriptorCount.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void updateKeeperInformation(KeeperDispatcher & keeper_dispatcher, AsynchronousMetricValues & new_values)
|
||||
{
|
||||
#if USE_NURAFT
|
||||
size_t is_leader = 0;
|
||||
size_t is_follower = 0;
|
||||
size_t is_observer = 0;
|
||||
size_t is_standalone = 0;
|
||||
size_t znode_count = 0;
|
||||
size_t watch_count = 0;
|
||||
size_t ephemerals_count = 0;
|
||||
size_t approximate_data_size = 0;
|
||||
size_t key_arena_size = 0;
|
||||
size_t latest_snapshot_size = 0;
|
||||
size_t open_file_descriptor_count = 0;
|
||||
size_t max_file_descriptor_count = 0;
|
||||
size_t followers = 0;
|
||||
size_t synced_followers = 0;
|
||||
size_t zxid = 0;
|
||||
size_t session_with_watches = 0;
|
||||
size_t paths_watched = 0;
|
||||
size_t snapshot_dir_size = 0;
|
||||
size_t log_dir_size = 0;
|
||||
|
||||
if (keeper_dispatcher.isServerActive())
|
||||
{
|
||||
auto keeper_info = keeper_dispatcher.getKeeper4LWInfo();
|
||||
is_standalone = static_cast<size_t>(keeper_info.is_standalone);
|
||||
is_leader = static_cast<size_t>(keeper_info.is_leader);
|
||||
is_observer = static_cast<size_t>(keeper_info.is_observer);
|
||||
is_follower = static_cast<size_t>(keeper_info.is_follower);
|
||||
|
||||
zxid = keeper_info.last_zxid;
|
||||
const auto & state_machine = keeper_dispatcher.getStateMachine();
|
||||
znode_count = state_machine.getNodesCount();
|
||||
watch_count = state_machine.getTotalWatchesCount();
|
||||
ephemerals_count = state_machine.getTotalEphemeralNodesCount();
|
||||
approximate_data_size = state_machine.getApproximateDataSize();
|
||||
key_arena_size = state_machine.getKeyArenaSize();
|
||||
latest_snapshot_size = state_machine.getLatestSnapshotBufSize();
|
||||
session_with_watches = state_machine.getSessionsWithWatchesCount();
|
||||
paths_watched = state_machine.getWatchedPathsCount();
|
||||
snapshot_dir_size = keeper_dispatcher.getSnapDirSize();
|
||||
log_dir_size = keeper_dispatcher.getLogDirSize();
|
||||
|
||||
# if defined(__linux__) || defined(__APPLE__)
|
||||
open_file_descriptor_count = getCurrentProcessFDCount();
|
||||
max_file_descriptor_count = getMaxFileDescriptorCount();
|
||||
# endif
|
||||
|
||||
if (keeper_info.is_leader)
|
||||
{
|
||||
followers = keeper_info.follower_count;
|
||||
synced_followers = keeper_info.synced_follower_count;
|
||||
}
|
||||
}
|
||||
|
||||
new_values["KeeperIsLeader"] = { is_leader, "1 if ClickHouse Keeper is a leader, 0 otherwise." };
|
||||
new_values["KeeperIsFollower"] = { is_follower, "1 if ClickHouse Keeper is a follower, 0 otherwise." };
|
||||
new_values["KeeperIsObserver"] = { is_observer, "1 if ClickHouse Keeper is an observer, 0 otherwise." };
|
||||
new_values["KeeperIsStandalone"] = { is_standalone, "1 if ClickHouse Keeper is in a standalone mode, 0 otherwise." };
|
||||
|
||||
new_values["KeeperZnodeCount"] = { znode_count, "The number of nodes (data entries) in ClickHouse Keeper." };
|
||||
new_values["KeeperWatchCount"] = { watch_count, "The number of watches in ClickHouse Keeper." };
|
||||
new_values["KeeperEphemeralsCount"] = { ephemerals_count, "The number of ephemeral nodes in ClickHouse Keeper." };
|
||||
|
||||
new_values["KeeperApproximateDataSize"] = { approximate_data_size, "The approximate data size of ClickHouse Keeper, in bytes." };
|
||||
new_values["KeeperKeyArenaSize"] = { key_arena_size, "The size in bytes of the memory arena for keys in ClickHouse Keeper." };
|
||||
new_values["KeeperLatestSnapshotSize"] = { latest_snapshot_size, "The uncompressed size in bytes of the latest snapshot created by ClickHouse Keeper." };
|
||||
|
||||
new_values["KeeperOpenFileDescriptorCount"] = { open_file_descriptor_count, "The number of open file descriptors in ClickHouse Keeper." };
|
||||
new_values["KeeperMaxFileDescriptorCount"] = { max_file_descriptor_count, "The maximum number of open file descriptors in ClickHouse Keeper." };
|
||||
|
||||
new_values["KeeperFollowers"] = { followers, "The number of followers of ClickHouse Keeper." };
|
||||
new_values["KeeperSyncedFollowers"] = { synced_followers, "The number of followers of ClickHouse Keeper who are also in-sync." };
|
||||
new_values["KeeperZxid"] = { zxid, "The current transaction id number (zxid) in ClickHouse Keeper." };
|
||||
new_values["KeeperSessionWithWatches"] = { session_with_watches, "The number of client sessions of ClickHouse Keeper having watches." };
|
||||
new_values["KeeperPathsWatched"] = { paths_watched, "The number of different paths watched by the clients of ClickHouse Keeper." };
|
||||
new_values["KeeperSnapshotDirSize"] = { snapshot_dir_size, "The size of the snapshots directory of ClickHouse Keeper, in bytes." };
|
||||
new_values["KeeperLogDirSize"] = { log_dir_size, "The size of the logs directory of ClickHouse Keeper, in bytes." };
|
||||
|
||||
auto keeper_log_info = keeper_dispatcher.getKeeperLogInfo();
|
||||
|
||||
new_values["KeeperLastLogIdx"] = { keeper_log_info.last_log_idx, "Index of the last log stored in ClickHouse Keeper." };
|
||||
new_values["KeeperLastLogTerm"] = { keeper_log_info.last_log_term, "Raft term of the last log stored in ClickHouse Keeper." };
|
||||
|
||||
new_values["KeeperLastCommittedLogIdx"] = { keeper_log_info.last_committed_log_idx, "Index of the last committed log in ClickHouse Keeper." };
|
||||
new_values["KeeperTargetCommitLogIdx"] = { keeper_log_info.target_committed_log_idx, "Index until which logs can be committed in ClickHouse Keeper." };
|
||||
new_values["KeeperLastSnapshotIdx"] = { keeper_log_info.last_snapshot_idx, "Index of the last log present in the last created snapshot." };
|
||||
|
||||
auto & keeper_connection_stats = keeper_dispatcher.getKeeperConnectionStats();
|
||||
|
||||
new_values["KeeperMinLatency"] = { keeper_connection_stats.getMinLatency(), "Minimal request latency of ClickHouse Keeper." };
|
||||
new_values["KeeperMaxLatency"] = { keeper_connection_stats.getMaxLatency(), "Maximum request latency of ClickHouse Keeper." };
|
||||
new_values["KeeperAvgLatency"] = { keeper_connection_stats.getAvgLatency(), "Average request latency of ClickHouse Keeper." };
|
||||
new_values["KeeperPacketsReceived"] = { keeper_connection_stats.getPacketsReceived(), "Number of packets received by ClickHouse Keeper." };
|
||||
new_values["KeeperPacketsSent"] = { keeper_connection_stats.getPacketsSent(), "Number of packets sent by ClickHouse Keeper." };
|
||||
#endif
|
||||
}
|
||||
|
||||
KeeperAsynchronousMetrics::KeeperAsynchronousMetrics(
|
||||
TinyContextPtr tiny_context_, int update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_)
|
||||
: AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_), tiny_context(std::move(tiny_context_))
|
||||
{
|
||||
}
|
||||
|
||||
void KeeperAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values, TimePoint /*update_time*/, TimePoint /*current_time*/)
|
||||
{
|
||||
#if USE_NURAFT
|
||||
{
|
||||
auto keeper_dispatcher = tiny_context->tryGetKeeperDispatcher();
|
||||
if (keeper_dispatcher)
|
||||
updateKeeperInformation(*keeper_dispatcher, new_values);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
25
src/Coordination/KeeperAsynchronousMetrics.h
Normal file
25
src/Coordination/KeeperAsynchronousMetrics.h
Normal file
@ -0,0 +1,25 @@
|
||||
#pragma once
|
||||
|
||||
#include <Coordination/TinyContext.h>
|
||||
#include <Common/AsynchronousMetrics.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class KeeperDispatcher;
|
||||
void updateKeeperInformation(KeeperDispatcher & keeper_dispatcher, AsynchronousMetricValues & new_values);
|
||||
|
||||
class KeeperAsynchronousMetrics : public AsynchronousMetrics
|
||||
{
|
||||
public:
|
||||
KeeperAsynchronousMetrics(
|
||||
TinyContextPtr tiny_context_, int update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_);
|
||||
|
||||
private:
|
||||
TinyContextPtr tiny_context;
|
||||
|
||||
void updateImpl(AsynchronousMetricValues & new_values, TimePoint update_time, TimePoint current_time) override;
|
||||
};
|
||||
|
||||
|
||||
}
|
@ -109,7 +109,7 @@ uint64_t KeeperLogStore::size() const
|
||||
void KeeperLogStore::end_of_append_batch(uint64_t /*start_index*/, uint64_t /*count*/)
|
||||
{
|
||||
std::lock_guard lock(changelog_lock);
|
||||
changelog.flush();
|
||||
changelog.flushAsync();
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::log_entry> KeeperLogStore::getLatestConfigChange() const
|
||||
@ -132,4 +132,16 @@ bool KeeperLogStore::flushChangelogAndShutdown()
|
||||
return true;
|
||||
}
|
||||
|
||||
uint64_t KeeperLogStore::last_durable_index()
|
||||
{
|
||||
std::lock_guard lock(changelog_lock);
|
||||
return changelog.lastDurableIndex();
|
||||
}
|
||||
|
||||
void KeeperLogStore::setRaftServer(const nuraft::ptr<nuraft::raft_server> & raft_server)
|
||||
{
|
||||
std::lock_guard lock(changelog_lock);
|
||||
return changelog.setRaftServer(raft_server);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -62,12 +62,16 @@ public:
|
||||
/// Current log storage size
|
||||
uint64_t size() const;
|
||||
|
||||
uint64_t last_durable_index() override;
|
||||
|
||||
/// Flush batch of appended entries
|
||||
void end_of_append_batch(uint64_t start_index, uint64_t count) override;
|
||||
|
||||
/// Get entry with latest config in logstore
|
||||
nuraft::ptr<nuraft::log_entry> getLatestConfigChange() const;
|
||||
|
||||
void setRaftServer(const nuraft::ptr<nuraft::raft_server> & raft_server);
|
||||
|
||||
private:
|
||||
mutable std::mutex changelog_lock;
|
||||
Poco::Logger * log;
|
||||
|
@ -266,6 +266,7 @@ void KeeperServer::forceRecovery()
|
||||
void KeeperServer::launchRaftServer(const Poco::Util::AbstractConfiguration & config, bool enable_ipv6)
|
||||
{
|
||||
nuraft::raft_params params;
|
||||
params.parallel_log_appending_ = true;
|
||||
params.heart_beat_interval_
|
||||
= getValueOrMaxInt32AndLogWarning(coordination_settings->heart_beat_interval_ms.totalMilliseconds(), "heart_beat_interval_ms", log);
|
||||
params.election_timeout_lower_bound_ = getValueOrMaxInt32AndLogWarning(
|
||||
@ -352,6 +353,8 @@ void KeeperServer::launchRaftServer(const Poco::Util::AbstractConfiguration & co
|
||||
if (!raft_instance)
|
||||
throw Exception(ErrorCodes::RAFT_ERROR, "Cannot allocate RAFT instance");
|
||||
|
||||
state_manager->getLogStore()->setRaftServer(raft_instance);
|
||||
|
||||
raft_instance->start_server(init_options.skip_initial_election_timeout_);
|
||||
|
||||
nuraft::ptr<nuraft::raft_server> casted_raft_server = raft_instance;
|
||||
@ -446,8 +449,8 @@ void KeeperServer::shutdownRaftServer()
|
||||
|
||||
void KeeperServer::shutdown()
|
||||
{
|
||||
state_manager->flushAndShutDownLogStore();
|
||||
shutdownRaftServer();
|
||||
state_manager->flushAndShutDownLogStore();
|
||||
state_machine->shutdownStorage();
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/LockMemoryExceptionInThread.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
|
||||
#include <Coordination/pathUtils.h>
|
||||
#include <Coordination/KeeperConstants.h>
|
||||
@ -27,6 +28,19 @@
|
||||
#include <base/defines.h>
|
||||
#include <filesystem>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event KeeperCreateRequest;
|
||||
extern const Event KeeperRemoveRequest;
|
||||
extern const Event KeeperSetRequest;
|
||||
extern const Event KeeperCheckRequest;
|
||||
extern const Event KeeperMultiRequest;
|
||||
extern const Event KeeperMultiReadRequest;
|
||||
extern const Event KeeperGetRequest;
|
||||
extern const Event KeeperListRequest;
|
||||
extern const Event KeeperExistsRequest;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -865,6 +879,7 @@ struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestPr
|
||||
std::vector<KeeperStorage::Delta>
|
||||
preprocess(KeeperStorage & storage, int64_t zxid, int64_t session_id, int64_t time, uint64_t & digest, const KeeperContext & keeper_context) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperCreateRequest);
|
||||
Coordination::ZooKeeperCreateRequest & request = dynamic_cast<Coordination::ZooKeeperCreateRequest &>(*zk_request);
|
||||
|
||||
std::vector<KeeperStorage::Delta> new_deltas;
|
||||
@ -986,6 +1001,7 @@ struct KeeperStorageGetRequestProcessor final : public KeeperStorageRequestProce
|
||||
std::vector<KeeperStorage::Delta>
|
||||
preprocess(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/, int64_t /*time*/, uint64_t & /*digest*/, const KeeperContext & /*keeper_context*/) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperGetRequest);
|
||||
Coordination::ZooKeeperGetRequest & request = dynamic_cast<Coordination::ZooKeeperGetRequest &>(*zk_request);
|
||||
|
||||
if (request.path == Coordination::keeper_api_version_path)
|
||||
@ -1040,6 +1056,7 @@ struct KeeperStorageGetRequestProcessor final : public KeeperStorageRequestProce
|
||||
|
||||
Coordination::ZooKeeperResponsePtr processLocal(KeeperStorage & storage, int64_t zxid) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperGetRequest);
|
||||
return processImpl<true>(storage, zxid);
|
||||
}
|
||||
};
|
||||
@ -1055,6 +1072,7 @@ struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestPr
|
||||
std::vector<KeeperStorage::Delta>
|
||||
preprocess(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/, int64_t /*time*/, uint64_t & digest, const KeeperContext & keeper_context) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperRemoveRequest);
|
||||
Coordination::ZooKeeperRemoveRequest & request = dynamic_cast<Coordination::ZooKeeperRemoveRequest &>(*zk_request);
|
||||
|
||||
std::vector<KeeperStorage::Delta> new_deltas;
|
||||
@ -1145,6 +1163,7 @@ struct KeeperStorageExistsRequestProcessor final : public KeeperStorageRequestPr
|
||||
std::vector<KeeperStorage::Delta>
|
||||
preprocess(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/, int64_t /*time*/, uint64_t & /*digest*/, const KeeperContext & /*keeper_context*/) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperExistsRequest);
|
||||
Coordination::ZooKeeperExistsRequest & request = dynamic_cast<Coordination::ZooKeeperExistsRequest &>(*zk_request);
|
||||
|
||||
if (!storage.uncommitted_state.getNode(request.path))
|
||||
@ -1194,6 +1213,7 @@ struct KeeperStorageExistsRequestProcessor final : public KeeperStorageRequestPr
|
||||
|
||||
Coordination::ZooKeeperResponsePtr processLocal(KeeperStorage & storage, int64_t zxid) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperExistsRequest);
|
||||
return processImpl<true>(storage, zxid);
|
||||
}
|
||||
};
|
||||
@ -1209,6 +1229,7 @@ struct KeeperStorageSetRequestProcessor final : public KeeperStorageRequestProce
|
||||
std::vector<KeeperStorage::Delta>
|
||||
preprocess(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/, int64_t time, uint64_t & digest, const KeeperContext & keeper_context) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperSetRequest);
|
||||
Coordination::ZooKeeperSetRequest & request = dynamic_cast<Coordination::ZooKeeperSetRequest &>(*zk_request);
|
||||
|
||||
std::vector<KeeperStorage::Delta> new_deltas;
|
||||
@ -1301,6 +1322,7 @@ struct KeeperStorageListRequestProcessor final : public KeeperStorageRequestProc
|
||||
std::vector<KeeperStorage::Delta>
|
||||
preprocess(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/, int64_t /*time*/, uint64_t & /*digest*/, const KeeperContext & /*keeper_context*/) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperListRequest);
|
||||
Coordination::ZooKeeperListRequest & request = dynamic_cast<Coordination::ZooKeeperListRequest &>(*zk_request);
|
||||
|
||||
if (!storage.uncommitted_state.getNode(request.path))
|
||||
@ -1387,6 +1409,7 @@ struct KeeperStorageListRequestProcessor final : public KeeperStorageRequestProc
|
||||
|
||||
Coordination::ZooKeeperResponsePtr processLocal(KeeperStorage & storage, int64_t zxid) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperListRequest);
|
||||
return processImpl<true>(storage, zxid);
|
||||
}
|
||||
};
|
||||
@ -1402,6 +1425,7 @@ struct KeeperStorageCheckRequestProcessor final : public KeeperStorageRequestPro
|
||||
std::vector<KeeperStorage::Delta>
|
||||
preprocess(KeeperStorage & storage, int64_t zxid, int64_t /*session_id*/, int64_t /*time*/, uint64_t & /*digest*/, const KeeperContext & /*keeper_context*/) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperCheckRequest);
|
||||
Coordination::ZooKeeperCheckRequest & request = dynamic_cast<Coordination::ZooKeeperCheckRequest &>(*zk_request);
|
||||
|
||||
if (!storage.uncommitted_state.getNode(request.path))
|
||||
@ -1463,6 +1487,7 @@ struct KeeperStorageCheckRequestProcessor final : public KeeperStorageRequestPro
|
||||
|
||||
Coordination::ZooKeeperResponsePtr processLocal(KeeperStorage & storage, int64_t zxid) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperCheckRequest);
|
||||
return processImpl<true>(storage, zxid);
|
||||
}
|
||||
};
|
||||
@ -1689,6 +1714,7 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
|
||||
std::vector<KeeperStorage::Delta>
|
||||
preprocess(KeeperStorage & storage, int64_t zxid, int64_t session_id, int64_t time, uint64_t & digest, const KeeperContext & keeper_context) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperMultiRequest);
|
||||
std::vector<Coordination::Error> response_errors;
|
||||
response_errors.reserve(concrete_requests.size());
|
||||
uint64_t current_digest = digest;
|
||||
@ -1756,6 +1782,7 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
|
||||
|
||||
Coordination::ZooKeeperResponsePtr processLocal(KeeperStorage & storage, int64_t zxid) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperMultiReadRequest);
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperMultiResponse & response = dynamic_cast<Coordination::ZooKeeperMultiResponse &>(*response_ptr);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "TinyContext.h"
|
||||
#include <Coordination/TinyContext.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Coordination/KeeperDispatcher.h>
|
@ -10,7 +10,7 @@ namespace DB
|
||||
|
||||
class KeeperDispatcher;
|
||||
|
||||
class TinyContext: public std::enable_shared_from_this<TinyContext>
|
||||
class TinyContext : public std::enable_shared_from_this<TinyContext>
|
||||
{
|
||||
public:
|
||||
std::shared_ptr<KeeperDispatcher> getKeeperDispatcher() const;
|
||||
@ -31,4 +31,6 @@ private:
|
||||
ConfigurationPtr config TSA_GUARDED_BY(keeper_dispatcher_mutex);
|
||||
};
|
||||
|
||||
using TinyContextPtr = std::shared_ptr<TinyContext>;
|
||||
|
||||
}
|
@ -67,6 +67,7 @@ class CoordinationTest : public ::testing::TestWithParam<CompressionParam>
|
||||
{
|
||||
protected:
|
||||
DB::KeeperContextPtr keeper_context = std::make_shared<DB::KeeperContext>();
|
||||
Poco::Logger * log{&Poco::Logger::get("CoordinationTest")};
|
||||
};
|
||||
|
||||
TEST_P(CoordinationTest, BuildTest)
|
||||
@ -129,10 +130,13 @@ struct SimpliestRaftServer
|
||||
params.snapshot_distance_ = 1; /// forcefully send snapshots
|
||||
params.client_req_timeout_ = 3000;
|
||||
params.return_method_ = nuraft::raft_params::blocking;
|
||||
params.parallel_log_appending_ = true;
|
||||
|
||||
nuraft::raft_server::init_options opts;
|
||||
opts.start_server_in_constructor_ = false;
|
||||
raft_instance = launcher.init(
|
||||
state_machine, state_manager, nuraft::cs_new<DB::LoggerWrapper>("ToyRaftLogger", DB::LogsLevel::trace), port,
|
||||
nuraft::asio_service::options{}, params);
|
||||
nuraft::asio_service::options{}, params, opts);
|
||||
|
||||
if (!raft_instance)
|
||||
{
|
||||
@ -140,6 +144,10 @@ struct SimpliestRaftServer
|
||||
_exit(1);
|
||||
}
|
||||
|
||||
state_manager->getLogStore()->setRaftServer(raft_instance);
|
||||
|
||||
raft_instance->start_server(false);
|
||||
|
||||
std::cout << "init Raft instance " << server_id;
|
||||
for (size_t ii = 0; ii < 20; ++ii)
|
||||
{
|
||||
@ -207,7 +215,7 @@ TEST_P(CoordinationTest, TestSummingRaft1)
|
||||
|
||||
while (s1.state_machine->getValue() != 143)
|
||||
{
|
||||
std::cout << "Waiting s1 to apply entry\n";
|
||||
LOG_INFO(log, "Waiting s1 to apply entry");
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
}
|
||||
|
||||
@ -240,6 +248,15 @@ TEST_P(CoordinationTest, ChangelogTestSimple)
|
||||
EXPECT_EQ(changelog.log_entries(1, 2)->size(), 1);
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
void waitDurableLogs(nuraft::log_store & log_store)
|
||||
{
|
||||
while (log_store.last_durable_index() != log_store.next_slot() - 1)
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(200));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
TEST_P(CoordinationTest, ChangelogTestFile)
|
||||
{
|
||||
@ -250,6 +267,9 @@ TEST_P(CoordinationTest, ChangelogTestFile)
|
||||
auto entry = getLogEntry("hello world", 77);
|
||||
changelog.append(entry);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
for (const auto & p : fs::directory_iterator("./logs"))
|
||||
EXPECT_EQ(p.path(), "./logs/changelog_1_5.bin" + params.extension);
|
||||
@ -261,6 +281,8 @@ TEST_P(CoordinationTest, ChangelogTestFile)
|
||||
changelog.append(entry);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
}
|
||||
@ -271,6 +293,7 @@ TEST_P(CoordinationTest, ChangelogReadWrite)
|
||||
ChangelogDirTest test("./logs");
|
||||
DB::KeeperLogStore changelog("./logs", 1000, true, params.enable_compression);
|
||||
changelog.init(1, 0);
|
||||
|
||||
for (size_t i = 0; i < 10; ++i)
|
||||
{
|
||||
auto entry = getLogEntry("hello world", i * 10);
|
||||
@ -280,6 +303,8 @@ TEST_P(CoordinationTest, ChangelogReadWrite)
|
||||
|
||||
EXPECT_EQ(changelog.size(), 10);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
DB::KeeperLogStore changelog_reader("./logs", 1000, true, params.enable_compression);
|
||||
changelog_reader.init(1, 0);
|
||||
EXPECT_EQ(changelog_reader.size(), 10);
|
||||
@ -315,6 +340,8 @@ TEST_P(CoordinationTest, ChangelogWriteAt)
|
||||
changelog.write_at(7, entry);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_EQ(changelog.size(), 7);
|
||||
EXPECT_EQ(changelog.last_entry()->get_term(), 77);
|
||||
EXPECT_EQ(changelog.entry_at(7)->get_term(), 77);
|
||||
@ -344,6 +371,9 @@ TEST_P(CoordinationTest, ChangelogTestAppendAfterRead)
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
EXPECT_EQ(changelog.size(), 7);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
|
||||
@ -358,6 +388,8 @@ TEST_P(CoordinationTest, ChangelogTestAppendAfterRead)
|
||||
}
|
||||
changelog_reader.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog_reader.size(), 10);
|
||||
|
||||
waitDurableLogs(changelog_reader);
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
|
||||
@ -371,6 +403,8 @@ TEST_P(CoordinationTest, ChangelogTestAppendAfterRead)
|
||||
changelog_reader.append(entry);
|
||||
changelog_reader.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog_reader.size(), 11);
|
||||
|
||||
waitDurableLogs(changelog_reader);
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_11_15.bin" + params.extension));
|
||||
@ -396,6 +430,8 @@ TEST_P(CoordinationTest, ChangelogTestCompaction)
|
||||
}
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_EQ(changelog.size(), 3);
|
||||
|
||||
changelog.compact(2);
|
||||
@ -416,6 +452,8 @@ TEST_P(CoordinationTest, ChangelogTestCompaction)
|
||||
changelog.append(e4);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
|
||||
@ -454,6 +492,8 @@ TEST_P(CoordinationTest, ChangelogTestBatchOperations)
|
||||
|
||||
EXPECT_EQ(changelog.size(), 10);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
auto entries = changelog.pack(1, 5);
|
||||
|
||||
DB::KeeperLogStore apply_changelog("./logs", 100, true, params.enable_compression);
|
||||
@ -499,6 +539,8 @@ TEST_P(CoordinationTest, ChangelogTestBatchOperationsEmpty)
|
||||
|
||||
EXPECT_EQ(changelog.size(), 10);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
auto entries = changelog.pack(5, 5);
|
||||
|
||||
ChangelogDirTest test1("./logs1");
|
||||
@ -543,6 +585,8 @@ TEST_P(CoordinationTest, ChangelogTestWriteAtPreviousFile)
|
||||
}
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_11_15.bin" + params.extension));
|
||||
@ -561,6 +605,8 @@ TEST_P(CoordinationTest, ChangelogTestWriteAtPreviousFile)
|
||||
EXPECT_EQ(changelog.next_slot(), 8);
|
||||
EXPECT_EQ(changelog.last_entry()->get_term(), 5555);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
|
||||
@ -592,6 +638,8 @@ TEST_P(CoordinationTest, ChangelogTestWriteAtFileBorder)
|
||||
}
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_11_15.bin" + params.extension));
|
||||
@ -610,6 +658,8 @@ TEST_P(CoordinationTest, ChangelogTestWriteAtFileBorder)
|
||||
EXPECT_EQ(changelog.next_slot(), 12);
|
||||
EXPECT_EQ(changelog.last_entry()->get_term(), 5555);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_11_15.bin" + params.extension));
|
||||
@ -633,7 +683,6 @@ TEST_P(CoordinationTest, ChangelogTestWriteAtAllFiles)
|
||||
ChangelogDirTest test("./logs");
|
||||
DB::KeeperLogStore changelog("./logs", 5, true, params.enable_compression);
|
||||
changelog.init(1, 0);
|
||||
|
||||
for (size_t i = 0; i < 33; ++i)
|
||||
{
|
||||
auto entry = getLogEntry(std::to_string(i) + "_hello_world", i * 10);
|
||||
@ -641,6 +690,8 @@ TEST_P(CoordinationTest, ChangelogTestWriteAtAllFiles)
|
||||
}
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_11_15.bin" + params.extension));
|
||||
@ -659,6 +710,8 @@ TEST_P(CoordinationTest, ChangelogTestWriteAtAllFiles)
|
||||
EXPECT_EQ(changelog.next_slot(), 2);
|
||||
EXPECT_EQ(changelog.last_entry()->get_term(), 5555);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
|
||||
EXPECT_FALSE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
@ -683,6 +736,8 @@ TEST_P(CoordinationTest, ChangelogTestStartNewLogAfterRead)
|
||||
}
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog.size(), 35);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_11_15.bin" + params.extension));
|
||||
@ -692,7 +747,6 @@ TEST_P(CoordinationTest, ChangelogTestStartNewLogAfterRead)
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_31_35.bin" + params.extension));
|
||||
EXPECT_FALSE(fs::exists("./logs/changelog_36_40.bin" + params.extension));
|
||||
|
||||
|
||||
DB::KeeperLogStore changelog_reader("./logs", 5, true, params.enable_compression);
|
||||
changelog_reader.init(1, 0);
|
||||
|
||||
@ -701,6 +755,8 @@ TEST_P(CoordinationTest, ChangelogTestStartNewLogAfterRead)
|
||||
changelog_reader.end_of_append_batch(0, 0);
|
||||
|
||||
EXPECT_EQ(changelog_reader.size(), 36);
|
||||
|
||||
waitDurableLogs(changelog_reader);
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_11_15.bin" + params.extension));
|
||||
@ -746,6 +802,8 @@ TEST_P(CoordinationTest, ChangelogTestReadAfterBrokenTruncate)
|
||||
}
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog.size(), 35);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_11_15.bin" + params.extension));
|
||||
@ -779,6 +837,8 @@ TEST_P(CoordinationTest, ChangelogTestReadAfterBrokenTruncate)
|
||||
EXPECT_EQ(changelog_reader.size(), 11);
|
||||
EXPECT_EQ(changelog_reader.last_entry()->get_term(), 7777);
|
||||
|
||||
waitDurableLogs(changelog_reader);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_5.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_6_10.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_11_15.bin" + params.extension));
|
||||
@ -809,6 +869,7 @@ TEST_P(CoordinationTest, ChangelogTestReadAfterBrokenTruncate2)
|
||||
}
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_20.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_21_40.bin" + params.extension));
|
||||
|
||||
@ -824,6 +885,9 @@ TEST_P(CoordinationTest, ChangelogTestReadAfterBrokenTruncate2)
|
||||
auto entry = getLogEntry("hello_world", 7777);
|
||||
changelog_reader.append(entry);
|
||||
changelog_reader.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog_reader);
|
||||
|
||||
EXPECT_EQ(changelog_reader.size(), 1);
|
||||
EXPECT_EQ(changelog_reader.last_entry()->get_term(), 7777);
|
||||
|
||||
@ -848,6 +912,7 @@ TEST_P(CoordinationTest, ChangelogTestLostFiles)
|
||||
}
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_20.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_21_40.bin" + params.extension));
|
||||
|
||||
@ -874,6 +939,8 @@ TEST_P(CoordinationTest, ChangelogTestLostFiles2)
|
||||
}
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_10.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_11_20.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_21_30.bin" + params.extension));
|
||||
@ -1330,6 +1397,8 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint
|
||||
changelog.append(entry);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
state_machine->pre_commit(i, changelog.entry_at(i)->get_buf());
|
||||
state_machine->commit(i, changelog.entry_at(i)->get_buf());
|
||||
bool snapshot_created = false;
|
||||
@ -1339,7 +1408,7 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint
|
||||
nuraft::async_result<bool>::handler_type when_done = [&snapshot_created] (bool & ret, nuraft::ptr<std::exception> &/*exception*/)
|
||||
{
|
||||
snapshot_created = ret;
|
||||
std::cerr << "Snapshot finished\n";
|
||||
LOG_INFO(&Poco::Logger::get("CoordinationTest"), "Snapshot finished");
|
||||
};
|
||||
|
||||
state_machine->create_snapshot(s, when_done);
|
||||
@ -1511,6 +1580,8 @@ TEST_P(CoordinationTest, TestRotateIntervalChanges)
|
||||
changelog.append(entry);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
}
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
}
|
||||
|
||||
|
||||
@ -1527,6 +1598,8 @@ TEST_P(CoordinationTest, TestRotateIntervalChanges)
|
||||
changelog_1.end_of_append_batch(0, 0);
|
||||
}
|
||||
|
||||
waitDurableLogs(changelog_1);
|
||||
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_1_100.bin" + params.extension));
|
||||
EXPECT_TRUE(fs::exists("./logs/changelog_101_110.bin" + params.extension));
|
||||
|
||||
@ -1542,6 +1615,8 @@ TEST_P(CoordinationTest, TestRotateIntervalChanges)
|
||||
changelog_2.end_of_append_batch(0, 0);
|
||||
}
|
||||
|
||||
waitDurableLogs(changelog_2);
|
||||
|
||||
changelog_2.compact(105);
|
||||
std::this_thread::sleep_for(std::chrono::microseconds(1000));
|
||||
|
||||
@ -1562,6 +1637,8 @@ TEST_P(CoordinationTest, TestRotateIntervalChanges)
|
||||
changelog_3.end_of_append_batch(0, 0);
|
||||
}
|
||||
|
||||
waitDurableLogs(changelog_3);
|
||||
|
||||
changelog_3.compact(125);
|
||||
std::this_thread::sleep_for(std::chrono::microseconds(1000));
|
||||
EXPECT_FALSE(fs::exists("./logs/changelog_101_110.bin" + params.extension));
|
||||
@ -1609,6 +1686,7 @@ TEST_P(CoordinationTest, TestCompressedLogsMultipleRewrite)
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
}
|
||||
|
||||
waitDurableLogs(changelog);
|
||||
|
||||
DB::KeeperLogStore changelog1("./logs", 100, true, test_params.enable_compression);
|
||||
changelog1.init(0, 3);
|
||||
@ -1683,43 +1761,47 @@ TEST_P(CoordinationTest, ChangelogInsertThreeTimesSmooth)
|
||||
auto params = GetParam();
|
||||
ChangelogDirTest test("./logs");
|
||||
{
|
||||
std::cerr << "================First time=====================\n";
|
||||
LOG_INFO(log, "================First time=====================");
|
||||
DB::KeeperLogStore changelog("./logs", 100, true, params.enable_compression);
|
||||
changelog.init(1, 0);
|
||||
auto entry = getLogEntry("hello_world", 1000);
|
||||
changelog.append(entry);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog.next_slot(), 2);
|
||||
waitDurableLogs(changelog);
|
||||
}
|
||||
|
||||
{
|
||||
std::cerr << "================Second time=====================\n";
|
||||
LOG_INFO(log, "================Second time=====================");
|
||||
DB::KeeperLogStore changelog("./logs", 100, true, params.enable_compression);
|
||||
changelog.init(1, 0);
|
||||
auto entry = getLogEntry("hello_world", 1000);
|
||||
changelog.append(entry);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog.next_slot(), 3);
|
||||
waitDurableLogs(changelog);
|
||||
}
|
||||
|
||||
{
|
||||
std::cerr << "================Third time=====================\n";
|
||||
LOG_INFO(log, "================Third time=====================");
|
||||
DB::KeeperLogStore changelog("./logs", 100, true, params.enable_compression);
|
||||
changelog.init(1, 0);
|
||||
auto entry = getLogEntry("hello_world", 1000);
|
||||
changelog.append(entry);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog.next_slot(), 4);
|
||||
waitDurableLogs(changelog);
|
||||
}
|
||||
|
||||
{
|
||||
std::cerr << "================Fourth time=====================\n";
|
||||
LOG_INFO(log, "================Fourth time=====================");
|
||||
DB::KeeperLogStore changelog("./logs", 100, true, params.enable_compression);
|
||||
changelog.init(1, 0);
|
||||
auto entry = getLogEntry("hello_world", 1000);
|
||||
changelog.append(entry);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog.next_slot(), 5);
|
||||
waitDurableLogs(changelog);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1730,7 +1812,7 @@ TEST_P(CoordinationTest, ChangelogInsertMultipleTimesSmooth)
|
||||
ChangelogDirTest test("./logs");
|
||||
for (size_t i = 0; i < 36; ++i)
|
||||
{
|
||||
std::cerr << "================First time=====================\n";
|
||||
LOG_INFO(log, "================First time=====================");
|
||||
DB::KeeperLogStore changelog("./logs", 100, true, params.enable_compression);
|
||||
changelog.init(1, 0);
|
||||
for (size_t j = 0; j < 7; ++j)
|
||||
@ -1739,6 +1821,7 @@ TEST_P(CoordinationTest, ChangelogInsertMultipleTimesSmooth)
|
||||
changelog.append(entry);
|
||||
}
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
waitDurableLogs(changelog);
|
||||
}
|
||||
|
||||
DB::KeeperLogStore changelog("./logs", 100, true, params.enable_compression);
|
||||
@ -1750,37 +1833,49 @@ TEST_P(CoordinationTest, ChangelogInsertThreeTimesHard)
|
||||
{
|
||||
auto params = GetParam();
|
||||
ChangelogDirTest test("./logs");
|
||||
std::cerr << "================First time=====================\n";
|
||||
DB::KeeperLogStore changelog1("./logs", 100, true, params.enable_compression);
|
||||
changelog1.init(1, 0);
|
||||
auto entry = getLogEntry("hello_world", 1000);
|
||||
changelog1.append(entry);
|
||||
changelog1.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog1.next_slot(), 2);
|
||||
{
|
||||
LOG_INFO(log, "================First time=====================");
|
||||
DB::KeeperLogStore changelog1("./logs", 100, true, params.enable_compression);
|
||||
changelog1.init(1, 0);
|
||||
auto entry = getLogEntry("hello_world", 1000);
|
||||
changelog1.append(entry);
|
||||
changelog1.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog1.next_slot(), 2);
|
||||
waitDurableLogs(changelog1);
|
||||
}
|
||||
|
||||
std::cerr << "================Second time=====================\n";
|
||||
DB::KeeperLogStore changelog2("./logs", 100, true, params.enable_compression);
|
||||
changelog2.init(1, 0);
|
||||
entry = getLogEntry("hello_world", 1000);
|
||||
changelog2.append(entry);
|
||||
changelog2.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog2.next_slot(), 3);
|
||||
{
|
||||
LOG_INFO(log, "================Second time=====================");
|
||||
DB::KeeperLogStore changelog2("./logs", 100, true, params.enable_compression);
|
||||
changelog2.init(1, 0);
|
||||
auto entry = getLogEntry("hello_world", 1000);
|
||||
changelog2.append(entry);
|
||||
changelog2.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog2.next_slot(), 3);
|
||||
waitDurableLogs(changelog2);
|
||||
}
|
||||
|
||||
std::cerr << "================Third time=====================\n";
|
||||
DB::KeeperLogStore changelog3("./logs", 100, true, params.enable_compression);
|
||||
changelog3.init(1, 0);
|
||||
entry = getLogEntry("hello_world", 1000);
|
||||
changelog3.append(entry);
|
||||
changelog3.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog3.next_slot(), 4);
|
||||
{
|
||||
LOG_INFO(log, "================Third time=====================");
|
||||
DB::KeeperLogStore changelog3("./logs", 100, true, params.enable_compression);
|
||||
changelog3.init(1, 0);
|
||||
auto entry = getLogEntry("hello_world", 1000);
|
||||
changelog3.append(entry);
|
||||
changelog3.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog3.next_slot(), 4);
|
||||
waitDurableLogs(changelog3);
|
||||
}
|
||||
|
||||
std::cerr << "================Fourth time=====================\n";
|
||||
DB::KeeperLogStore changelog4("./logs", 100, true, params.enable_compression);
|
||||
changelog4.init(1, 0);
|
||||
entry = getLogEntry("hello_world", 1000);
|
||||
changelog4.append(entry);
|
||||
changelog4.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog4.next_slot(), 5);
|
||||
{
|
||||
LOG_INFO(log, "================Fourth time=====================");
|
||||
DB::KeeperLogStore changelog4("./logs", 100, true, params.enable_compression);
|
||||
changelog4.init(1, 0);
|
||||
auto entry = getLogEntry("hello_world", 1000);
|
||||
changelog4.append(entry);
|
||||
changelog4.end_of_append_batch(0, 0);
|
||||
EXPECT_EQ(changelog4.next_slot(), 5);
|
||||
waitDurableLogs(changelog4);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(CoordinationTest, TestStorageSnapshotEqual)
|
||||
|
@ -398,6 +398,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(UInt64, max_untracked_memory, (4 * 1024 * 1024), "Small allocations and deallocations are grouped in thread local variable and tracked or profiled only when amount (in absolute value) becomes larger than specified value. If the value is higher than 'memory_profiler_step' it will be effectively lowered to 'memory_profiler_step'.", 0) \
|
||||
M(UInt64, memory_profiler_step, (4 * 1024 * 1024), "Whenever query memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stack trace. Zero means disabled memory profiler. Values lower than a few megabytes will slow down query processing.", 0) \
|
||||
M(Float, memory_profiler_sample_probability, 0., "Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Note that sampling happens only when the amount of untracked memory exceeds 'max_untracked_memory'. You may want to set 'max_untracked_memory' to 0 for extra fine grained sampling.", 0) \
|
||||
M(Bool, trace_profile_events, false, "Send to system.trace_log profile event and value of increment on each increment with 'ProfileEvent' trace_type", 0) \
|
||||
\
|
||||
M(UInt64, memory_usage_overcommit_max_wait_microseconds, 5'000'000, "Maximum time thread will wait for memory to be freed in the case of memory overcommit. If timeout is reached and memory is not freed, exception is thrown.", 0) \
|
||||
\
|
||||
@ -582,6 +583,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(Bool, query_plan_filter_push_down, true, "Allow to push down filter by predicate query plan step", 0) \
|
||||
M(Bool, query_plan_optimize_primary_key, true, "Analyze primary key using query plan (instead of AST)", 0) \
|
||||
M(Bool, query_plan_read_in_order, true, "Use query plan for read-in-order optimisation", 0) \
|
||||
M(Bool, query_plan_aggregation_in_order, true, "Use query plan for aggregation-in-order optimisation", 0) \
|
||||
M(UInt64, regexp_max_matches_per_row, 1000, "Max matches of any single regexp per row, used to safeguard 'extractAllGroupsHorizontal' against consuming too much memory with greedy RE.", 0) \
|
||||
\
|
||||
M(UInt64, limit, 0, "Limit on read rows from the most 'end' result for select query, default 0 means no limit length", 0) \
|
||||
@ -782,6 +784,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(Bool, input_format_values_accurate_types_of_literals, true, "For Values format: when parsing and interpreting expressions using template, check actual type of literal to avoid possible overflow and precision issues.", 0) \
|
||||
M(Bool, input_format_avro_allow_missing_fields, false, "For Avro/AvroConfluent format: when field is not found in schema use default value instead of error", 0) \
|
||||
M(Bool, input_format_avro_null_as_default, false, "For Avro/AvroConfluent format: insert default in case of null and non Nullable column", 0) \
|
||||
M(UInt64, format_binary_max_string_size, 1_GiB, "The maximum allowed size for String in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit", 0) \
|
||||
M(URI, format_avro_schema_registry_url, "", "For AvroConfluent format: Confluent Schema Registry URL.", 0) \
|
||||
\
|
||||
M(Bool, output_format_json_quote_64bit_integers, true, "Controls quoting of 64-bit integers in JSON output format.", 0) \
|
||||
|
@ -78,6 +78,7 @@ namespace SettingsChangesHistory
|
||||
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||
{
|
||||
{"22.12", {{"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}},
|
||||
{"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}},
|
||||
{"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}},
|
||||
{"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"},
|
||||
|
@ -51,13 +51,13 @@ struct SortColumnDescription
|
||||
SortColumnDescription() = default;
|
||||
|
||||
explicit SortColumnDescription(
|
||||
const std::string & column_name_,
|
||||
std::string column_name_,
|
||||
int direction_ = 1,
|
||||
int nulls_direction_ = 1,
|
||||
const std::shared_ptr<Collator> & collator_ = nullptr,
|
||||
bool with_fill_ = false,
|
||||
const FillColumnDescription & fill_description_ = {})
|
||||
: column_name(column_name_)
|
||||
: column_name(std::move(column_name_))
|
||||
, direction(direction_)
|
||||
, nulls_direction(nulls_direction_)
|
||||
, collator(collator_)
|
||||
|
@ -303,17 +303,17 @@ public:
|
||||
*/
|
||||
|
||||
/// There is two variants for binary serde. First variant work with Field.
|
||||
virtual void serializeBinary(const Field & field, WriteBuffer & ostr) const = 0;
|
||||
virtual void deserializeBinary(Field & field, ReadBuffer & istr) const = 0;
|
||||
virtual void serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const = 0;
|
||||
virtual void deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const = 0;
|
||||
|
||||
/// Other variants takes a column, to avoid creating temporary Field object.
|
||||
/// Column must be non-constant.
|
||||
|
||||
/// Serialize one value of a column at specified row number.
|
||||
virtual void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const = 0;
|
||||
virtual void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const = 0;
|
||||
/// Deserialize one value and insert into a column.
|
||||
/// If method will throw an exception, then column will be in same state as before call to method.
|
||||
virtual void deserializeBinary(IColumn & column, ReadBuffer & istr) const = 0;
|
||||
virtual void deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const = 0;
|
||||
|
||||
/** Text serialization with escaping but without quoting.
|
||||
*/
|
||||
|
@ -17,13 +17,13 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void SerializationAggregateFunction::serializeBinary(const Field & field, WriteBuffer & ostr) const
|
||||
void SerializationAggregateFunction::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
const AggregateFunctionStateData & state = field.get<const AggregateFunctionStateData &>();
|
||||
writeBinary(state.data, ostr);
|
||||
}
|
||||
|
||||
void SerializationAggregateFunction::deserializeBinary(Field & field, ReadBuffer & istr) const
|
||||
void SerializationAggregateFunction::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const
|
||||
{
|
||||
field = AggregateFunctionStateData();
|
||||
AggregateFunctionStateData & s = field.get<AggregateFunctionStateData &>();
|
||||
@ -31,12 +31,12 @@ void SerializationAggregateFunction::deserializeBinary(Field & field, ReadBuffer
|
||||
s.name = type_name;
|
||||
}
|
||||
|
||||
void SerializationAggregateFunction::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const
|
||||
void SerializationAggregateFunction::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
function->serialize(assert_cast<const ColumnAggregateFunction &>(column).getData()[row_num], ostr, version);
|
||||
}
|
||||
|
||||
void SerializationAggregateFunction::deserializeBinary(IColumn & column, ReadBuffer & istr) const
|
||||
void SerializationAggregateFunction::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
||||
{
|
||||
ColumnAggregateFunction & column_concrete = assert_cast<ColumnAggregateFunction &>(column);
|
||||
|
||||
|
@ -22,11 +22,11 @@ public:
|
||||
: function(function_), type_name(std::move(type_name_)), version(version_) {}
|
||||
|
||||
/// NOTE These two functions for serializing single values are incompatible with the functions below.
|
||||
void serializeBinary(const Field & field, WriteBuffer & ostr) const override;
|
||||
void deserializeBinary(Field & field, ReadBuffer & istr) const override;
|
||||
void serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||
void deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const override;
|
||||
|
||||
void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const override;
|
||||
void deserializeBinary(IColumn & column, ReadBuffer & istr) const override;
|
||||
void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||
void deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
||||
void serializeBinaryBulk(const IColumn & column, WriteBuffer & ostr, size_t offset, size_t limit) const override;
|
||||
void deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, size_t limit, double avg_value_size_hint) const override;
|
||||
void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||
|
@ -27,18 +27,18 @@ static constexpr size_t MAX_ARRAY_SIZE = 1ULL << 30;
|
||||
static constexpr size_t MAX_ARRAYS_SIZE = 1ULL << 40;
|
||||
|
||||
|
||||
void SerializationArray::serializeBinary(const Field & field, WriteBuffer & ostr) const
|
||||
void SerializationArray::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
const Array & a = field.get<const Array &>();
|
||||
writeVarUInt(a.size(), ostr);
|
||||
for (size_t i = 0; i < a.size(); ++i)
|
||||
{
|
||||
nested->serializeBinary(a[i], ostr);
|
||||
nested->serializeBinary(a[i], ostr, settings);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void SerializationArray::deserializeBinary(Field & field, ReadBuffer & istr) const
|
||||
void SerializationArray::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, istr);
|
||||
@ -46,11 +46,11 @@ void SerializationArray::deserializeBinary(Field & field, ReadBuffer & istr) con
|
||||
Array & arr = field.get<Array &>();
|
||||
arr.reserve(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
nested->deserializeBinary(arr.emplace_back(), istr);
|
||||
nested->deserializeBinary(arr.emplace_back(), istr, settings);
|
||||
}
|
||||
|
||||
|
||||
void SerializationArray::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const
|
||||
void SerializationArray::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
const ColumnArray & column_array = assert_cast<const ColumnArray &>(column);
|
||||
const ColumnArray::Offsets & offsets = column_array.getOffsets();
|
||||
@ -63,11 +63,11 @@ void SerializationArray::serializeBinary(const IColumn & column, size_t row_num,
|
||||
|
||||
const IColumn & nested_column = column_array.getData();
|
||||
for (size_t i = offset; i < next_offset; ++i)
|
||||
nested->serializeBinary(nested_column, i, ostr);
|
||||
nested->serializeBinary(nested_column, i, ostr, settings);
|
||||
}
|
||||
|
||||
|
||||
void SerializationArray::deserializeBinary(IColumn & column, ReadBuffer & istr) const
|
||||
void SerializationArray::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
ColumnArray & column_array = assert_cast<ColumnArray &>(column);
|
||||
ColumnArray::Offsets & offsets = column_array.getOffsets();
|
||||
@ -81,7 +81,7 @@ void SerializationArray::deserializeBinary(IColumn & column, ReadBuffer & istr)
|
||||
try
|
||||
{
|
||||
for (; i < size; ++i)
|
||||
nested->deserializeBinary(nested_column, istr);
|
||||
nested->deserializeBinary(nested_column, istr, settings);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -13,10 +13,10 @@ private:
|
||||
public:
|
||||
explicit SerializationArray(const SerializationPtr & nested_) : nested(nested_) {}
|
||||
|
||||
void serializeBinary(const Field & field, WriteBuffer & ostr) const override;
|
||||
void deserializeBinary(Field & field, ReadBuffer & istr) const override;
|
||||
void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const override;
|
||||
void deserializeBinary(IColumn & column, ReadBuffer & istr) const override;
|
||||
void serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||
void deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||
void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override;
|
||||
void deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override;
|
||||
|
||||
void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||
void deserializeText(IColumn & column, ReadBuffer & istr, const FormatSettings &, bool whole) const override;
|
||||
|
@ -12,14 +12,14 @@ namespace DB
|
||||
{
|
||||
|
||||
template <typename T>
|
||||
void SerializationDecimalBase<T>::serializeBinary(const Field & field, WriteBuffer & ostr) const
|
||||
void SerializationDecimalBase<T>::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
FieldType x = field.get<DecimalField<T>>();
|
||||
writeBinary(x, ostr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void SerializationDecimalBase<T>::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const
|
||||
void SerializationDecimalBase<T>::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
const FieldType & x = assert_cast<const ColumnType &>(column).getElement(row_num);
|
||||
writeBinary(x, ostr);
|
||||
@ -39,7 +39,7 @@ void SerializationDecimalBase<T>::serializeBinaryBulk(const IColumn & column, Wr
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void SerializationDecimalBase<T>::deserializeBinary(Field & field, ReadBuffer & istr) const
|
||||
void SerializationDecimalBase<T>::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const
|
||||
{
|
||||
typename FieldType::NativeType x;
|
||||
readBinary(x, istr);
|
||||
@ -47,7 +47,7 @@ void SerializationDecimalBase<T>::deserializeBinary(Field & field, ReadBuffer &
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void SerializationDecimalBase<T>::deserializeBinary(IColumn & column, ReadBuffer & istr) const
|
||||
void SerializationDecimalBase<T>::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
||||
{
|
||||
typename FieldType::NativeType x;
|
||||
readBinary(x, istr);
|
||||
|
@ -20,12 +20,12 @@ public:
|
||||
SerializationDecimalBase(UInt32 precision_, UInt32 scale_)
|
||||
: precision(precision_), scale(scale_) {}
|
||||
|
||||
void serializeBinary(const Field & field, WriteBuffer & ostr) const override;
|
||||
void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const override;
|
||||
void serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||
void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override;
|
||||
void serializeBinaryBulk(const IColumn & column, WriteBuffer & ostr, size_t offset, size_t limit) const override;
|
||||
|
||||
void deserializeBinary(Field & field, ReadBuffer & istr) const override;
|
||||
void deserializeBinary(IColumn & column, ReadBuffer & istr) const override;
|
||||
void deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const override;
|
||||
void deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override;
|
||||
void deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, size_t limit, double avg_value_size_hint) const override;
|
||||
};
|
||||
|
||||
|
@ -26,7 +26,7 @@ namespace ErrorCodes
|
||||
|
||||
static constexpr size_t MAX_STRINGS_SIZE = 1ULL << 30;
|
||||
|
||||
void SerializationFixedString::serializeBinary(const Field & field, WriteBuffer & ostr) const
|
||||
void SerializationFixedString::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
const String & s = field.get<const String &>();
|
||||
ostr.write(s.data(), std::min(s.size(), n));
|
||||
@ -36,7 +36,7 @@ void SerializationFixedString::serializeBinary(const Field & field, WriteBuffer
|
||||
}
|
||||
|
||||
|
||||
void SerializationFixedString::deserializeBinary(Field & field, ReadBuffer & istr) const
|
||||
void SerializationFixedString::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const
|
||||
{
|
||||
field = String();
|
||||
String & s = field.get<String &>();
|
||||
@ -45,13 +45,13 @@ void SerializationFixedString::deserializeBinary(Field & field, ReadBuffer & ist
|
||||
}
|
||||
|
||||
|
||||
void SerializationFixedString::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const
|
||||
void SerializationFixedString::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
ostr.write(reinterpret_cast<const char *>(&assert_cast<const ColumnFixedString &>(column).getChars()[n * row_num]), n);
|
||||
}
|
||||
|
||||
|
||||
void SerializationFixedString::deserializeBinary(IColumn & column, ReadBuffer & istr) const
|
||||
void SerializationFixedString::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const
|
||||
{
|
||||
ColumnFixedString::Chars & data = assert_cast<ColumnFixedString &>(column).getChars();
|
||||
size_t old_size = data.size();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user