mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-19 16:20:50 +00:00
Merge branch 'master' into vdimir/full_text_index_packed_1
This commit is contained in:
commit
91f8fe1af6
34
.github/actions/debug/action.yml
vendored
34
.github/actions/debug/action.yml
vendored
@ -4,15 +4,31 @@ description: Prints workflow debug info
|
|||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Print envs
|
- name: Envs, event.json and contexts
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "::group::Envs"
|
echo '::group::Environment variables'
|
||||||
env
|
env | sort
|
||||||
echo "::endgroup::"
|
echo '::endgroup::'
|
||||||
- name: Print Event.json
|
|
||||||
shell: bash
|
echo '::group::event.json'
|
||||||
run: |
|
|
||||||
echo "::group::Event.json"
|
|
||||||
python3 -m json.tool "$GITHUB_EVENT_PATH"
|
python3 -m json.tool "$GITHUB_EVENT_PATH"
|
||||||
echo "::endgroup::"
|
echo '::endgroup::'
|
||||||
|
|
||||||
|
cat << 'EOF'
|
||||||
|
::group::github context
|
||||||
|
${{ toJSON(github) }}
|
||||||
|
::endgroup::
|
||||||
|
|
||||||
|
::group::env context
|
||||||
|
${{ toJSON(env) }}
|
||||||
|
::endgroup::
|
||||||
|
|
||||||
|
::group::runner context
|
||||||
|
${{ toJSON(runner) }}
|
||||||
|
::endgroup::
|
||||||
|
|
||||||
|
::group::job context
|
||||||
|
${{ toJSON(job) }}
|
||||||
|
::endgroup::
|
||||||
|
EOF
|
||||||
|
2
.github/workflows/backport_branches.yml
vendored
2
.github/workflows/backport_branches.yml
vendored
@ -27,6 +27,8 @@ jobs:
|
|||||||
clear-repository: true # to ensure correct digests
|
clear-repository: true # to ensure correct digests
|
||||||
fetch-depth: 0 # to get version
|
fetch-depth: 0 # to get version
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: Labels check
|
- name: Labels check
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
2
.github/workflows/cherry_pick.yml
vendored
2
.github/workflows/cherry_pick.yml
vendored
@ -33,6 +33,8 @@ jobs:
|
|||||||
clear-repository: true
|
clear-repository: true
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: Cherry pick
|
- name: Cherry pick
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
4
.github/workflows/create_release.yml
vendored
4
.github/workflows/create_release.yml
vendored
@ -56,13 +56,13 @@ jobs:
|
|||||||
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||||
runs-on: [self-hosted, release-maker]
|
runs-on: [self-hosted, release-maker]
|
||||||
steps:
|
steps:
|
||||||
- name: DebugInfo
|
|
||||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: Prepare Release Info
|
- name: Prepare Release Info
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
1
.github/workflows/docker_test_images.yml
vendored
1
.github/workflows/docker_test_images.yml
vendored
@ -11,6 +11,7 @@ name: Build docker images
|
|||||||
required: false
|
required: false
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
DockerBuildAarch64:
|
DockerBuildAarch64:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
5
.github/workflows/jepsen.yml
vendored
5
.github/workflows/jepsen.yml
vendored
@ -8,20 +8,21 @@ on: # yamllint disable-line rule:truthy
|
|||||||
schedule:
|
schedule:
|
||||||
- cron: '0 */6 * * *'
|
- cron: '0 */6 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
RunConfig:
|
RunConfig:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
outputs:
|
outputs:
|
||||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||||
steps:
|
steps:
|
||||||
- name: DebugInfo
|
|
||||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
clear-repository: true # to ensure correct digests
|
clear-repository: true # to ensure correct digests
|
||||||
fetch-depth: 0 # to get version
|
fetch-depth: 0 # to get version
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
|
4
.github/workflows/master.yml
vendored
4
.github/workflows/master.yml
vendored
@ -15,14 +15,14 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||||
steps:
|
steps:
|
||||||
- name: DebugInfo
|
|
||||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
clear-repository: true # to ensure correct digests
|
clear-repository: true # to ensure correct digests
|
||||||
fetch-depth: 0 # to get version
|
fetch-depth: 0 # to get version
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: Merge sync PR
|
- name: Merge sync PR
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
4
.github/workflows/merge_queue.yml
vendored
4
.github/workflows/merge_queue.yml
vendored
@ -14,14 +14,14 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||||
steps:
|
steps:
|
||||||
- name: DebugInfo
|
|
||||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
clear-repository: true # to ensure correct digests
|
clear-repository: true # to ensure correct digests
|
||||||
fetch-depth: 0 # to get a version
|
fetch-depth: 0 # to get a version
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: Cancel PR workflow
|
- name: Cancel PR workflow
|
||||||
run: |
|
run: |
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
||||||
|
4
.github/workflows/nightly.yml
vendored
4
.github/workflows/nightly.yml
vendored
@ -15,14 +15,14 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||||
steps:
|
steps:
|
||||||
- name: DebugInfo
|
|
||||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
clear-repository: true # to ensure correct digests
|
clear-repository: true # to ensure correct digests
|
||||||
fetch-depth: 0 # to get version
|
fetch-depth: 0 # to get version
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
|
4
.github/workflows/pull_request.yml
vendored
4
.github/workflows/pull_request.yml
vendored
@ -25,14 +25,14 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||||
steps:
|
steps:
|
||||||
- name: DebugInfo
|
|
||||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
clear-repository: true # to ensure correct digests
|
clear-repository: true # to ensure correct digests
|
||||||
fetch-depth: 0 # to get a version
|
fetch-depth: 0 # to get a version
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: Cancel previous Sync PR workflow
|
- name: Cancel previous Sync PR workflow
|
||||||
run: |
|
run: |
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
||||||
|
2
.github/workflows/release_branches.yml
vendored
2
.github/workflows/release_branches.yml
vendored
@ -24,6 +24,8 @@ jobs:
|
|||||||
clear-repository: true # to ensure correct digests
|
clear-repository: true # to ensure correct digests
|
||||||
fetch-depth: 0 # to get version
|
fetch-depth: 0 # to get version
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: Labels check
|
- name: Labels check
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
4
.github/workflows/reusable_simple_job.yml
vendored
4
.github/workflows/reusable_simple_job.yml
vendored
@ -62,8 +62,6 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
|
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
|
||||||
steps:
|
steps:
|
||||||
- name: DebugInfo
|
|
||||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
@ -72,6 +70,8 @@ jobs:
|
|||||||
submodules: ${{inputs.submodules}}
|
submodules: ${{inputs.submodules}}
|
||||||
fetch-depth: ${{inputs.checkout_depth}}
|
fetch-depth: ${{inputs.checkout_depth}}
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: Set build envs
|
- name: Set build envs
|
||||||
run: |
|
run: |
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
31
CITATION.cff
Normal file
31
CITATION.cff
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# This CITATION.cff file was generated with cffinit.
|
||||||
|
|
||||||
|
cff-version: 1.2.0
|
||||||
|
title: "ClickHouse"
|
||||||
|
message: "If you use this software, please cite it as below."
|
||||||
|
type: software
|
||||||
|
authors:
|
||||||
|
- family-names: "Milovidov"
|
||||||
|
given-names: "Alexey"
|
||||||
|
repository-code: 'https://github.com/ClickHouse/ClickHouse'
|
||||||
|
url: 'https://clickhouse.com'
|
||||||
|
license: Apache-2.0
|
||||||
|
preferred-citation:
|
||||||
|
type: article
|
||||||
|
authors:
|
||||||
|
- family-names: "Schulze"
|
||||||
|
given-names: "Robert"
|
||||||
|
- family-names: "Schreiber"
|
||||||
|
given-names: "Tom"
|
||||||
|
- family-names: "Yatsishin"
|
||||||
|
given-names: "Ilya"
|
||||||
|
- family-names: "Dahimene"
|
||||||
|
given-names: "Ryadh"
|
||||||
|
- family-names: "Milovidov"
|
||||||
|
given-names: "Alexey"
|
||||||
|
journal: "Proceedings of the VLDB Endowment"
|
||||||
|
title: "ClickHouse - Lightning Fast Analytics for Everyone"
|
||||||
|
year: 2024
|
||||||
|
volume: 17
|
||||||
|
issue: 12
|
||||||
|
doi: 10.14778/3685800.3685802
|
18
README.md
18
README.md
@ -42,21 +42,19 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else
|
|||||||
|
|
||||||
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov:
|
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov:
|
||||||
|
|
||||||
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
|
|
||||||
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
|
|
||||||
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
|
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
|
||||||
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
|
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
|
||||||
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
|
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
|
||||||
|
|
||||||
Other upcoming meetups
|
Other upcoming meetups
|
||||||
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
|
|
||||||
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
|
|
||||||
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
|
|
||||||
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
|
|
||||||
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
|
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
|
||||||
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
|
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
|
||||||
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
|
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
|
||||||
|
* [Bangalore Meetup](https://www.meetup.com/clickhouse-bangalore-user-group/events/303208274/) - September 18
|
||||||
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
|
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
|
||||||
|
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
|
||||||
|
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
|
||||||
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
||||||
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
|
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
|
||||||
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
|
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
|
||||||
@ -64,7 +62,13 @@ Other upcoming meetups
|
|||||||
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
|
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
|
||||||
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
||||||
|
|
||||||
|
Recently completed events
|
||||||
|
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
|
||||||
|
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
|
||||||
|
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
|
||||||
|
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
|
||||||
|
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
|
||||||
|
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
|
||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
|
@ -66,13 +66,11 @@ TRAP(gethostbyname)
|
|||||||
TRAP(gethostbyname2)
|
TRAP(gethostbyname2)
|
||||||
TRAP(gethostent)
|
TRAP(gethostent)
|
||||||
TRAP(getlogin)
|
TRAP(getlogin)
|
||||||
TRAP(getmntent)
|
|
||||||
TRAP(getnetbyaddr)
|
TRAP(getnetbyaddr)
|
||||||
TRAP(getnetbyname)
|
TRAP(getnetbyname)
|
||||||
TRAP(getnetent)
|
TRAP(getnetent)
|
||||||
TRAP(getnetgrent)
|
TRAP(getnetgrent)
|
||||||
TRAP(getnetgrent_r)
|
TRAP(getnetgrent_r)
|
||||||
TRAP(getopt)
|
|
||||||
TRAP(getopt_long)
|
TRAP(getopt_long)
|
||||||
TRAP(getopt_long_only)
|
TRAP(getopt_long_only)
|
||||||
TRAP(getpass)
|
TRAP(getpass)
|
||||||
@ -133,7 +131,6 @@ TRAP(nrand48)
|
|||||||
TRAP(__ppc_get_timebase_freq)
|
TRAP(__ppc_get_timebase_freq)
|
||||||
TRAP(ptsname)
|
TRAP(ptsname)
|
||||||
TRAP(putchar_unlocked)
|
TRAP(putchar_unlocked)
|
||||||
TRAP(putenv)
|
|
||||||
TRAP(pututline)
|
TRAP(pututline)
|
||||||
TRAP(pututxline)
|
TRAP(pututxline)
|
||||||
TRAP(putwchar_unlocked)
|
TRAP(putwchar_unlocked)
|
||||||
@ -148,7 +145,6 @@ TRAP(sethostent)
|
|||||||
TRAP(sethostid)
|
TRAP(sethostid)
|
||||||
TRAP(setkey)
|
TRAP(setkey)
|
||||||
//TRAP(setlocale) // Used by replxx at startup
|
//TRAP(setlocale) // Used by replxx at startup
|
||||||
TRAP(setlogmask)
|
|
||||||
TRAP(setnetent)
|
TRAP(setnetent)
|
||||||
TRAP(setnetgrent)
|
TRAP(setnetgrent)
|
||||||
TRAP(setprotoent)
|
TRAP(setprotoent)
|
||||||
@ -203,7 +199,6 @@ TRAP(lgammal)
|
|||||||
TRAP(nftw)
|
TRAP(nftw)
|
||||||
TRAP(nl_langinfo)
|
TRAP(nl_langinfo)
|
||||||
TRAP(putc_unlocked)
|
TRAP(putc_unlocked)
|
||||||
TRAP(rand)
|
|
||||||
/** In the current POSIX.1 specification (POSIX.1-2008), readdir() is not required to be thread-safe. However, in modern
|
/** In the current POSIX.1 specification (POSIX.1-2008), readdir() is not required to be thread-safe. However, in modern
|
||||||
* implementations (including the glibc implementation), concurrent calls to readdir() that specify different directory streams
|
* implementations (including the glibc implementation), concurrent calls to readdir() that specify different directory streams
|
||||||
* are thread-safe. In cases where multiple threads must read from the same directory stream, using readdir() with external
|
* are thread-safe. In cases where multiple threads must read from the same directory stream, using readdir() with external
|
||||||
@ -288,4 +283,14 @@ TRAP(tss_get)
|
|||||||
TRAP(tss_set)
|
TRAP(tss_set)
|
||||||
TRAP(tss_delete)
|
TRAP(tss_delete)
|
||||||
|
|
||||||
|
#ifndef USE_MUSL
|
||||||
|
/// These produce duplicate symbol errors when statically linking with musl.
|
||||||
|
/// Maybe we can remove them from the musl fork.
|
||||||
|
TRAP(getopt)
|
||||||
|
TRAP(putenv)
|
||||||
|
TRAP(setlogmask)
|
||||||
|
TRAP(rand)
|
||||||
|
TRAP(getmntent)
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -48,25 +48,17 @@ std::string PathImpl::currentImpl()
|
|||||||
std::string PathImpl::homeImpl()
|
std::string PathImpl::homeImpl()
|
||||||
{
|
{
|
||||||
std::string path;
|
std::string path;
|
||||||
#if defined(_POSIX_C_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE)
|
|
||||||
size_t buf_size = 1024; // Same as glibc use for getpwuid
|
size_t buf_size = 1024; // Same as glibc use for getpwuid
|
||||||
std::vector<char> buf(buf_size);
|
std::vector<char> buf(buf_size);
|
||||||
struct passwd res;
|
struct passwd res;
|
||||||
struct passwd* pwd = nullptr;
|
struct passwd* pwd = nullptr;
|
||||||
|
|
||||||
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
|
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
|
||||||
#else
|
|
||||||
struct passwd* pwd = getpwuid(getuid());
|
|
||||||
#endif
|
|
||||||
if (pwd)
|
if (pwd)
|
||||||
path = pwd->pw_dir;
|
path = pwd->pw_dir;
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
#if defined(_POSIX_C_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE)
|
|
||||||
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
|
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
|
||||||
#else
|
|
||||||
pwd = getpwuid(geteuid());
|
|
||||||
#endif
|
|
||||||
if (pwd)
|
if (pwd)
|
||||||
path = pwd->pw_dir;
|
path = pwd->pw_dir;
|
||||||
else
|
else
|
||||||
|
@ -19,6 +19,8 @@
|
|||||||
|
|
||||||
|
|
||||||
#include <ios>
|
#include <ios>
|
||||||
|
#include <memory>
|
||||||
|
#include <functional>
|
||||||
#include "Poco/Any.h"
|
#include "Poco/Any.h"
|
||||||
#include "Poco/Buffer.h"
|
#include "Poco/Buffer.h"
|
||||||
#include "Poco/Exception.h"
|
#include "Poco/Exception.h"
|
||||||
@ -33,6 +35,27 @@ namespace Net
|
|||||||
{
|
{
|
||||||
|
|
||||||
|
|
||||||
|
class IHTTPSessionDataHooks
|
||||||
|
/// Interface to control stream of data bytes being sent or received though socket by HTTPSession
|
||||||
|
/// It allows to monitor, throttle and schedule data streams with syscall granulatrity
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
virtual ~IHTTPSessionDataHooks() = default;
|
||||||
|
|
||||||
|
virtual void atStart(int bytes) = 0;
|
||||||
|
/// Called before sending/receiving data `bytes` to/from socket.
|
||||||
|
|
||||||
|
virtual void atFinish(int bytes) = 0;
|
||||||
|
/// Called when sending/receiving of data `bytes` is successfully finished.
|
||||||
|
|
||||||
|
virtual void atFail() = 0;
|
||||||
|
/// If an error occurred during send/receive `fail()` is called instead of `finish()`.
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
using HTTPSessionDataHooksPtr = std::shared_ptr<IHTTPSessionDataHooks>;
|
||||||
|
|
||||||
|
|
||||||
class Net_API HTTPSession
|
class Net_API HTTPSession
|
||||||
/// HTTPSession implements basic HTTP session management
|
/// HTTPSession implements basic HTTP session management
|
||||||
/// for both HTTP clients and HTTP servers.
|
/// for both HTTP clients and HTTP servers.
|
||||||
@ -73,6 +96,12 @@ namespace Net
|
|||||||
Poco::Timespan getReceiveTimeout() const;
|
Poco::Timespan getReceiveTimeout() const;
|
||||||
/// Returns receive timeout for the HTTP session.
|
/// Returns receive timeout for the HTTP session.
|
||||||
|
|
||||||
|
void setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks = {});
|
||||||
|
/// Sets data hooks that will be called on every sent to the socket.
|
||||||
|
|
||||||
|
void setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks = {});
|
||||||
|
/// Sets data hooks that will be called on every receive from the socket.
|
||||||
|
|
||||||
bool connected() const;
|
bool connected() const;
|
||||||
/// Returns true if the underlying socket is connected.
|
/// Returns true if the underlying socket is connected.
|
||||||
|
|
||||||
@ -211,6 +240,10 @@ namespace Net
|
|||||||
Poco::Exception * _pException;
|
Poco::Exception * _pException;
|
||||||
Poco::Any _data;
|
Poco::Any _data;
|
||||||
|
|
||||||
|
// Data hooks
|
||||||
|
HTTPSessionDataHooksPtr _sendDataHooks;
|
||||||
|
HTTPSessionDataHooksPtr _receiveDataHooks;
|
||||||
|
|
||||||
friend class HTTPStreamBuf;
|
friend class HTTPStreamBuf;
|
||||||
friend class HTTPHeaderStreamBuf;
|
friend class HTTPHeaderStreamBuf;
|
||||||
friend class HTTPFixedLengthStreamBuf;
|
friend class HTTPFixedLengthStreamBuf;
|
||||||
@ -246,6 +279,16 @@ namespace Net
|
|||||||
return _receiveTimeout;
|
return _receiveTimeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void HTTPSession::setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks)
|
||||||
|
{
|
||||||
|
_sendDataHooks = sendDataHooks;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void HTTPSession::setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks)
|
||||||
|
{
|
||||||
|
_receiveDataHooks = receiveDataHooks;
|
||||||
|
}
|
||||||
|
|
||||||
inline StreamSocket & HTTPSession::socket()
|
inline StreamSocket & HTTPSession::socket()
|
||||||
{
|
{
|
||||||
return _socket;
|
return _socket;
|
||||||
|
@ -166,10 +166,17 @@ int HTTPSession::write(const char* buffer, std::streamsize length)
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
return _socket.sendBytes(buffer, (int) length);
|
if (_sendDataHooks)
|
||||||
|
_sendDataHooks->atStart((int) length);
|
||||||
|
int result = _socket.sendBytes(buffer, (int) length);
|
||||||
|
if (_sendDataHooks)
|
||||||
|
_sendDataHooks->atFinish(result);
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
catch (Poco::Exception& exc)
|
catch (Poco::Exception& exc)
|
||||||
{
|
{
|
||||||
|
if (_sendDataHooks)
|
||||||
|
_sendDataHooks->atFail();
|
||||||
setException(exc);
|
setException(exc);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
@ -180,10 +187,17 @@ int HTTPSession::receive(char* buffer, int length)
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
return _socket.receiveBytes(buffer, length);
|
if (_receiveDataHooks)
|
||||||
|
_receiveDataHooks->atStart(length);
|
||||||
|
int result = _socket.receiveBytes(buffer, length);
|
||||||
|
if (_receiveDataHooks)
|
||||||
|
_receiveDataHooks->atFinish(result);
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
catch (Poco::Exception& exc)
|
catch (Poco::Exception& exc)
|
||||||
{
|
{
|
||||||
|
if (_receiveDataHooks)
|
||||||
|
_receiveDataHooks->atFail();
|
||||||
setException(exc);
|
setException(exc);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
@ -18,4 +18,4 @@ target_compile_options (_poco_util
|
|||||||
-Wno-zero-as-null-pointer-constant
|
-Wno-zero-as-null-pointer-constant
|
||||||
)
|
)
|
||||||
target_include_directories (_poco_util SYSTEM PUBLIC "include")
|
target_include_directories (_poco_util SYSTEM PUBLIC "include")
|
||||||
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML)
|
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML Poco::Net)
|
||||||
|
@ -241,6 +241,20 @@ namespace Util
|
|||||||
/// If the value contains references to other properties (${<property>}), these
|
/// If the value contains references to other properties (${<property>}), these
|
||||||
/// are expanded.
|
/// are expanded.
|
||||||
|
|
||||||
|
std::string getHost(const std::string & key) const;
|
||||||
|
/// Returns the string value of the host property with the given name.
|
||||||
|
/// Throws a NotFoundException if the key does not exist.
|
||||||
|
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
|
||||||
|
/// If the value contains references to other properties (${<property>}), these
|
||||||
|
/// are expanded.
|
||||||
|
|
||||||
|
std::string getHost(const std::string & key, const std::string & defaultValue) const;
|
||||||
|
/// If a property with the given key exists, returns the host property's string value,
|
||||||
|
/// otherwise returns the given default value.
|
||||||
|
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
|
||||||
|
/// If the value contains references to other properties (${<property>}), these
|
||||||
|
/// are expanded.
|
||||||
|
|
||||||
virtual void setString(const std::string & key, const std::string & value);
|
virtual void setString(const std::string & key, const std::string & value);
|
||||||
/// Sets the property with the given key to the given value.
|
/// Sets the property with the given key to the given value.
|
||||||
/// An already existing value for the key is overwritten.
|
/// An already existing value for the key is overwritten.
|
||||||
@ -339,12 +353,35 @@ namespace Util
|
|||||||
static bool parseBool(const std::string & value);
|
static bool parseBool(const std::string & value);
|
||||||
void setRawWithEvent(const std::string & key, std::string value);
|
void setRawWithEvent(const std::string & key, std::string value);
|
||||||
|
|
||||||
|
static void checkHostValidity(const std::string & value);
|
||||||
|
/// Throws a SyntaxException if the value is not a valid host (IP address or domain).
|
||||||
|
|
||||||
virtual ~AbstractConfiguration();
|
virtual ~AbstractConfiguration();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::string internalExpand(const std::string & value) const;
|
std::string internalExpand(const std::string & value) const;
|
||||||
std::string uncheckedExpand(const std::string & value) const;
|
std::string uncheckedExpand(const std::string & value) const;
|
||||||
|
|
||||||
|
static bool isValidIPv4Address(const std::string & value);
|
||||||
|
/// IPv4 address considered valid if it is "0.0.0.0" or one of those,
|
||||||
|
/// defined by inet_aton() or inet_addr()
|
||||||
|
|
||||||
|
static bool isValidIPv6Address(const std::string & value);
|
||||||
|
/// IPv6 address considered valid if it is "::" or one of those,
|
||||||
|
/// defined by inet_pton() with AF_INET6 flag
|
||||||
|
/// (in this case it may have scope id and may be surrounded by '[', ']')
|
||||||
|
|
||||||
|
static bool isValidDomainName(const std::string & value);
|
||||||
|
/// <domain> ::= <subdomain> [ "." ]
|
||||||
|
/// <subdomain> ::= <label> | <subdomain> "." <label>
|
||||||
|
/// <label> ::= <letter> [ [ <ldh-str> ] <let-dig> ]
|
||||||
|
/// <ldh-str> ::= <let-dig-hyp> | <let-dig-hyp> <ldh-str>
|
||||||
|
/// <let-dig-hyp> ::= <let-dig> | "-"
|
||||||
|
/// <let-dig> ::= <letter> | <digit>
|
||||||
|
/// <letter> ::= any one of the 52 alphabetic characters A through Z in
|
||||||
|
/// upper case and a through z in lower case
|
||||||
|
/// <digit> ::= any one of the ten digits 0 through 9
|
||||||
|
|
||||||
AbstractConfiguration(const AbstractConfiguration &);
|
AbstractConfiguration(const AbstractConfiguration &);
|
||||||
AbstractConfiguration & operator=(const AbstractConfiguration &);
|
AbstractConfiguration & operator=(const AbstractConfiguration &);
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include "Poco/NumberParser.h"
|
#include "Poco/NumberParser.h"
|
||||||
#include "Poco/NumberFormatter.h"
|
#include "Poco/NumberFormatter.h"
|
||||||
#include "Poco/String.h"
|
#include "Poco/String.h"
|
||||||
|
#include "Poco/Net/IPAddressImpl.h"
|
||||||
|
|
||||||
|
|
||||||
using Poco::Mutex;
|
using Poco::Mutex;
|
||||||
@ -263,6 +264,41 @@ bool AbstractConfiguration::getBool(const std::string& key, bool defaultValue) c
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::string AbstractConfiguration::getHost(const std::string& key) const
|
||||||
|
{
|
||||||
|
Mutex::ScopedLock lock(_mutex);
|
||||||
|
|
||||||
|
std::string value;
|
||||||
|
if (getRaw(key, value))
|
||||||
|
{
|
||||||
|
std::string expandedValue = internalExpand(value);
|
||||||
|
checkHostValidity(expandedValue);
|
||||||
|
return expandedValue;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw NotFoundException(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::string AbstractConfiguration::getHost(const std::string& key, const std::string& defaultValue) const
|
||||||
|
{
|
||||||
|
Mutex::ScopedLock lock(_mutex);
|
||||||
|
|
||||||
|
std::string value;
|
||||||
|
if (getRaw(key, value))
|
||||||
|
{
|
||||||
|
std::string expandedValue = internalExpand(value);
|
||||||
|
checkHostValidity(expandedValue);
|
||||||
|
return expandedValue;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
checkHostValidity(defaultValue);
|
||||||
|
return defaultValue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void AbstractConfiguration::setString(const std::string& key, const std::string& value)
|
void AbstractConfiguration::setString(const std::string& key, const std::string& value)
|
||||||
{
|
{
|
||||||
setRawWithEvent(key, value);
|
setRawWithEvent(key, value);
|
||||||
@ -529,4 +565,68 @@ void AbstractConfiguration::setRawWithEvent(const std::string& key, std::string
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void AbstractConfiguration::checkHostValidity(const std::string& value)
|
||||||
|
{
|
||||||
|
if (!isValidIPv4Address(value) && !isValidIPv6Address(value) && !isValidDomainName(value))
|
||||||
|
{
|
||||||
|
throw SyntaxException("Property is not a valid host name", value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool AbstractConfiguration::isValidIPv4Address(const std::string& value)
|
||||||
|
{
|
||||||
|
using Poco::Net::Impl::IPv4AddressImpl;
|
||||||
|
IPv4AddressImpl empty4 = IPv4AddressImpl();
|
||||||
|
|
||||||
|
IPv4AddressImpl ipAddress = IPv4AddressImpl::parse(value);
|
||||||
|
return ipAddress != empty4 || value == "0.0.0.0";
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool AbstractConfiguration::isValidIPv6Address(const std::string& value)
|
||||||
|
{
|
||||||
|
#if defined(POCO_HAVE_IPv6)
|
||||||
|
using Poco::Net::Impl::IPv6AddressImpl;
|
||||||
|
IPv6AddressImpl empty6 = IPv6AddressImpl();
|
||||||
|
|
||||||
|
IPv6AddressImpl ipAddress = IPv6AddressImpl::parse(value);
|
||||||
|
return ipAddress != empty6 || value == "::";
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool AbstractConfiguration::isValidDomainName(const std::string& value)
|
||||||
|
{
|
||||||
|
if (value.empty() || value == "." || value.length() > 253)
|
||||||
|
return false;
|
||||||
|
int labelLength = 0;
|
||||||
|
char oldChar = 0;
|
||||||
|
|
||||||
|
for (char ch : value)
|
||||||
|
{
|
||||||
|
if (ch == '.')
|
||||||
|
{
|
||||||
|
if (labelLength == 0 || labelLength > 63 || oldChar == '-')
|
||||||
|
return false;
|
||||||
|
labelLength = 0;
|
||||||
|
}
|
||||||
|
else if (isalnum(ch) || ch == '-')
|
||||||
|
{
|
||||||
|
if (labelLength == 0 && (ch == '-' || isdigit(ch)))
|
||||||
|
return false;
|
||||||
|
++labelLength;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
oldChar = ch;
|
||||||
|
}
|
||||||
|
return oldChar == '.' || (labelLength > 0 && labelLength <= 63 && oldChar != '-');
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
} } // namespace Poco::Util
|
} } // namespace Poco::Util
|
||||||
|
2
contrib/curl
vendored
2
contrib/curl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit de7b3e89218467159a7af72d58cea8425946e97d
|
Subproject commit 83bedbd730d62b83744cc26fa0433d3f6e2e4cd6
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db
|
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e
|
@ -15,162 +15,64 @@ set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
|
|||||||
# These lists of sources were generated from build log of the original ICU build system (configure + make).
|
# These lists of sources were generated from build log of the original ICU build system (configure + make).
|
||||||
|
|
||||||
set(ICUUC_SOURCES
|
set(ICUUC_SOURCES
|
||||||
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/putil.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/umath.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/utypes.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/umutex.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uinit.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uobject.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/charstr.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/cstr.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/udata.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/utrace.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uhash.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uenum.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uvector.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ustack.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/resource.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/resbund.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucat.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/locmap.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uloc.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/locid.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/locutil.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/lsr.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/edits.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/appendable.cpp"
|
"${ICU_SOURCE_DIR}/common/appendable.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unistr.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ustring.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/cstring.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/utext.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unorm.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/chariter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/schriter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uiter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uchar.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uprops.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucase.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/propname.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ushape.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uscript.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unames.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/utrie.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/bmpset.cpp"
|
"${ICU_SOURCE_DIR}/common/bmpset.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uset.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uniset.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/caniter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/brkeng.cpp"
|
"${ICU_SOURCE_DIR}/common/brkeng.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/caniter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/chariter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/charstr.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/cstr.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/cstring.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/dictbe.cpp"
|
"${ICU_SOURCE_DIR}/common/dictbe.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/edits.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/filteredbrk.cpp"
|
"${ICU_SOURCE_DIR}/common/filteredbrk.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/locbased.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/locid.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/locmap.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/locutil.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/lsr.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/lstmbe.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/mlbe.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/propname.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/punycode.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/putil.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/rbbi.cpp"
|
"${ICU_SOURCE_DIR}/common/rbbi.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/rbbidata.cpp"
|
"${ICU_SOURCE_DIR}/common/rbbidata.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/rbbinode.cpp"
|
"${ICU_SOURCE_DIR}/common/rbbinode.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/rbbirb.cpp"
|
"${ICU_SOURCE_DIR}/common/rbbirb.cpp"
|
||||||
@ -178,166 +80,180 @@ set(ICUUC_SOURCES
|
|||||||
"${ICU_SOURCE_DIR}/common/rbbisetb.cpp"
|
"${ICU_SOURCE_DIR}/common/rbbisetb.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/rbbistbl.cpp"
|
"${ICU_SOURCE_DIR}/common/rbbistbl.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/rbbitblb.cpp"
|
"${ICU_SOURCE_DIR}/common/rbbitblb.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
|
"${ICU_SOURCE_DIR}/common/resbund.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/resource.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/restrace.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/schriter.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/serv.cpp"
|
"${ICU_SOURCE_DIR}/common/serv.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/servls.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/servlk.cpp"
|
"${ICU_SOURCE_DIR}/common/servlk.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/servlkf.cpp"
|
"${ICU_SOURCE_DIR}/common/servlkf.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/servls.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/servrbf.cpp"
|
"${ICU_SOURCE_DIR}/common/servrbf.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/servslkf.cpp"
|
"${ICU_SOURCE_DIR}/common/servslkf.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/uidna.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/usprep.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uts46.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/punycode.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/util.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/util_props.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/locbased.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/wintz.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ulist.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/sharedobject.cpp"
|
"${ICU_SOURCE_DIR}/common/sharedobject.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/simpleformatter.cpp"
|
"${ICU_SOURCE_DIR}/common/simpleformatter.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp"
|
"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/restrace.cpp"
|
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
|
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
|
||||||
"${ICU_SOURCE_DIR}/common/lstmbe.cpp")
|
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucase.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucat.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uchar.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/udata.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uenum.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uhash.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uidna.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uinit.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uiter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ulist.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uloc.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ulocale.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ulocbuilder.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/umath.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/umutex.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unames.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uniset.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unistr.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unorm.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uobject.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uprops.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uscript.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uset.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ushape.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/usprep.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ustack.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ustring.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/utext.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/util.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/util_props.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/utrace.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/utrie.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uts46.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/utypes.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uvector.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/common/wintz.cpp")
|
||||||
|
|
||||||
set(ICUI18N_SOURCES
|
set(ICUI18N_SOURCES
|
||||||
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
|
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
|
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/format.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/astro.cpp"
|
"${ICU_SOURCE_DIR}/i18n/astro.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
|
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/buddhcal.cpp"
|
"${ICU_SOURCE_DIR}/i18n/buddhcal.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
|
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
|
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/cecal.cpp"
|
"${ICU_SOURCE_DIR}/i18n/cecal.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
|
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
|
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/coleitr.cpp"
|
"${ICU_SOURCE_DIR}/i18n/coleitr.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/coll.cpp"
|
"${ICU_SOURCE_DIR}/i18n/coll.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/collation.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collation.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationdata.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationdata.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationfcd.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationfcd.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationiterator.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationiterator.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationkeys.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationkeys.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationroot.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationroot.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
|
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
|
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
|
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/search.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp"
|
"${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/csdetect.cpp"
|
"${ICU_SOURCE_DIR}/i18n/csdetect.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/csmatch.cpp"
|
"${ICU_SOURCE_DIR}/i18n/csmatch.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/csr2022.cpp"
|
"${ICU_SOURCE_DIR}/i18n/csr2022.cpp"
|
||||||
@ -346,60 +262,80 @@ set(ICUI18N_SOURCES
|
|||||||
"${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp"
|
"${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/csrucode.cpp"
|
"${ICU_SOURCE_DIR}/i18n/csrucode.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/csrutf8.cpp"
|
"${ICU_SOURCE_DIR}/i18n/csrutf8.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
|
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
|
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
|
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
|
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
|
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
|
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
|
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
|
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
|
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
|
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
|
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
|
"${ICU_SOURCE_DIR}/i18n/displayoptions.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
|
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
|
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
|
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
|
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp"
|
"${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp"
|
"${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
|
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
|
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
|
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
|
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
|
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
|
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
|
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
|
"${ICU_SOURCE_DIR}/i18n/format.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
|
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
|
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp"
|
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
|
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp"
|
"${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/fpositer.cpp"
|
"${ICU_SOURCE_DIR}/i18n/fpositer.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
|
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/gender.cpp"
|
"${ICU_SOURCE_DIR}/i18n/gender.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/region.cpp"
|
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
|
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
|
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
|
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
|
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/iso8601cal.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/measunit.cpp"
|
"${ICU_SOURCE_DIR}/i18n/measunit.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
|
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
|
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
|
"${ICU_SOURCE_DIR}/i18n/messageformat2.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/messageformat2_arguments.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/messageformat2_checker.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/messageformat2_data_model.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/messageformat2_errors.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/messageformat2_evaluation.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/messageformat2_formattable.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/messageformat2_formatter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/messageformat2_function_registry.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/messageformat2_parser.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/messageformat2_serializer.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_compact.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_compact.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_fluent.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_fluent.cpp"
|
||||||
@ -407,7 +343,9 @@ set(ICUI18N_SOURCES
|
|||||||
"${ICU_SOURCE_DIR}/i18n/number_grouping.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_grouping.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_longnames.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_longnames.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_notation.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_notation.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_output.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_output.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_padding.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_padding.cpp"
|
||||||
@ -415,46 +353,125 @@ set(ICUI18N_SOURCES
|
|||||||
"${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_rounding.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_rounding.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_scientific.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_scientific.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_simple.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
|
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp"
|
"${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp"
|
"${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/pluralranges.cpp"
|
"${ICU_SOURCE_DIR}/i18n/pluralranges.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/region.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/search.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp"
|
"${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/units_converter.cpp"
|
"${ICU_SOURCE_DIR}/i18n/units_converter.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/units_data.cpp"
|
"${ICU_SOURCE_DIR}/i18n/units_data.cpp"
|
||||||
"${ICU_SOURCE_DIR}/i18n/units_router.cpp")
|
"${ICU_SOURCE_DIR}/i18n/units_router.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
|
||||||
|
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp")
|
||||||
|
|
||||||
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
|
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
|
||||||
enable_language(ASM)
|
enable_language(ASM)
|
||||||
@ -464,6 +481,11 @@ if (ARCH_S390X)
|
|||||||
else()
|
else()
|
||||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" )
|
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" )
|
||||||
endif()
|
endif()
|
||||||
|
# ^^ you might be confused how for different little endian platforms (x86, ARM) the same assembly files can be used.
|
||||||
|
# These files are indeed assembly but they only contain data ('.long' directive), which makes them portable accross CPUs.
|
||||||
|
# Only the endianness and the character set (ASCII, EBCDIC) makes a difference, also see
|
||||||
|
# https://unicode-org.github.io/icu/userguide/icu_data/#sharing-icu-data-between-platforms, 'Sharing ICU Data Between Platforms')
|
||||||
|
# (and as an experiment, try re-generating the data files on x86 vs. ARM, ... you'll get exactly the same files)
|
||||||
|
|
||||||
set(ICUDATA_SOURCES
|
set(ICUDATA_SOURCES
|
||||||
"${ICUDATA_SOURCE_FILE}"
|
"${ICUDATA_SOURCE_FILE}"
|
||||||
|
2
contrib/libarchive
vendored
2
contrib/libarchive
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ee45796171324519f0c0bfd012018dd099296336
|
Subproject commit 313aa1fa10b657de791e3202c168a6c833bc3543
|
@ -157,7 +157,7 @@ if (TARGET ch_contrib::zlib)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (TARGET ch_contrib::zstd)
|
if (TARGET ch_contrib::zstd)
|
||||||
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_LIBZSTD_COMPRESSOR=1)
|
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_ZSTD_compressStream=1)
|
||||||
target_link_libraries(_libarchive PRIVATE ch_contrib::zstd)
|
target_link_libraries(_libarchive PRIVATE ch_contrib::zstd)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -334,13 +334,16 @@ typedef uint64_t uintmax_t;
|
|||||||
/* #undef ARCHIVE_XATTR_LINUX */
|
/* #undef ARCHIVE_XATTR_LINUX */
|
||||||
|
|
||||||
/* Version number of bsdcpio */
|
/* Version number of bsdcpio */
|
||||||
#define BSDCPIO_VERSION_STRING "3.7.0"
|
#define BSDCPIO_VERSION_STRING "3.7.4"
|
||||||
|
|
||||||
/* Version number of bsdtar */
|
/* Version number of bsdtar */
|
||||||
#define BSDTAR_VERSION_STRING "3.7.0"
|
#define BSDTAR_VERSION_STRING "3.7.4"
|
||||||
|
|
||||||
/* Version number of bsdcat */
|
/* Version number of bsdcat */
|
||||||
#define BSDCAT_VERSION_STRING "3.7.0"
|
#define BSDCAT_VERSION_STRING "3.7.4"
|
||||||
|
|
||||||
|
/* Version number of bsdunzip */
|
||||||
|
#define BSDUNZIP_VERSION_STRING "3.7.4"
|
||||||
|
|
||||||
/* Define to 1 if you have the `acl_create_entry' function. */
|
/* Define to 1 if you have the `acl_create_entry' function. */
|
||||||
/* #undef HAVE_ACL_CREATE_ENTRY */
|
/* #undef HAVE_ACL_CREATE_ENTRY */
|
||||||
@ -642,8 +645,8 @@ typedef uint64_t uintmax_t;
|
|||||||
/* Define to 1 if you have the `getgrnam_r' function. */
|
/* Define to 1 if you have the `getgrnam_r' function. */
|
||||||
#define HAVE_GETGRNAM_R 1
|
#define HAVE_GETGRNAM_R 1
|
||||||
|
|
||||||
/* Define to 1 if platform uses `optreset` to reset `getopt` */
|
/* Define to 1 if you have the `getline' function. */
|
||||||
#define HAVE_GETOPT_OPTRESET 1
|
#define HAVE_GETLINE 1
|
||||||
|
|
||||||
/* Define to 1 if you have the `getpid' function. */
|
/* Define to 1 if you have the `getpid' function. */
|
||||||
#define HAVE_GETPID 1
|
#define HAVE_GETPID 1
|
||||||
@ -750,6 +753,12 @@ typedef uint64_t uintmax_t;
|
|||||||
/* Define to 1 if you have the `pcreposix' library (-lpcreposix). */
|
/* Define to 1 if you have the `pcreposix' library (-lpcreposix). */
|
||||||
/* #undef HAVE_LIBPCREPOSIX */
|
/* #undef HAVE_LIBPCREPOSIX */
|
||||||
|
|
||||||
|
/* Define to 1 if you have the `pcre2-8' library (-lpcre2-8). */
|
||||||
|
/* #undef HAVE_LIBPCRE2 */
|
||||||
|
|
||||||
|
/* Define to 1 if you have the `pcreposix' library (-lpcre2posix). */
|
||||||
|
/* #undef HAVE_LIBPCRE2POSIX */
|
||||||
|
|
||||||
/* Define to 1 if you have the `xml2' library (-lxml2). */
|
/* Define to 1 if you have the `xml2' library (-lxml2). */
|
||||||
#define HAVE_LIBXML2 1
|
#define HAVE_LIBXML2 1
|
||||||
|
|
||||||
@ -765,9 +774,8 @@ typedef uint64_t uintmax_t;
|
|||||||
/* Define to 1 if you have the `zstd' library (-lzstd). */
|
/* Define to 1 if you have the `zstd' library (-lzstd). */
|
||||||
/* #undef HAVE_LIBZSTD */
|
/* #undef HAVE_LIBZSTD */
|
||||||
|
|
||||||
/* Define to 1 if you have the `zstd' library (-lzstd) with compression
|
/* Define to 1 if you have the ZSTD_compressStream function. */
|
||||||
support. */
|
/* #undef HAVE_ZSTD_compressStream */
|
||||||
/* #undef HAVE_LIBZSTD_COMPRESSOR */
|
|
||||||
|
|
||||||
/* Define to 1 if you have the <limits.h> header file. */
|
/* Define to 1 if you have the <limits.h> header file. */
|
||||||
#define HAVE_LIMITS_H 1
|
#define HAVE_LIMITS_H 1
|
||||||
@ -923,6 +931,9 @@ typedef uint64_t uintmax_t;
|
|||||||
/* Define to 1 if you have the <pcreposix.h> header file. */
|
/* Define to 1 if you have the <pcreposix.h> header file. */
|
||||||
/* #undef HAVE_PCREPOSIX_H */
|
/* #undef HAVE_PCREPOSIX_H */
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <pcre2posix.h> header file. */
|
||||||
|
/* #undef HAVE_PCRE2POSIX_H */
|
||||||
|
|
||||||
/* Define to 1 if you have the `pipe' function. */
|
/* Define to 1 if you have the `pipe' function. */
|
||||||
#define HAVE_PIPE 1
|
#define HAVE_PIPE 1
|
||||||
|
|
||||||
@ -1029,6 +1040,12 @@ typedef uint64_t uintmax_t;
|
|||||||
/* Define to 1 if you have the `strrchr' function. */
|
/* Define to 1 if you have the `strrchr' function. */
|
||||||
#define HAVE_STRRCHR 1
|
#define HAVE_STRRCHR 1
|
||||||
|
|
||||||
|
/* Define to 1 if the system has the type `struct statfs'. */
|
||||||
|
/* #undef HAVE_STRUCT_STATFS */
|
||||||
|
|
||||||
|
/* Define to 1 if `f_iosize' is a member of `struct statfs'. */
|
||||||
|
/* #undef HAVE_STRUCT_STATFS_F_IOSIZE */
|
||||||
|
|
||||||
/* Define to 1 if `f_namemax' is a member of `struct statfs'. */
|
/* Define to 1 if `f_namemax' is a member of `struct statfs'. */
|
||||||
/* #undef HAVE_STRUCT_STATFS_F_NAMEMAX */
|
/* #undef HAVE_STRUCT_STATFS_F_NAMEMAX */
|
||||||
|
|
||||||
@ -1077,6 +1094,9 @@ typedef uint64_t uintmax_t;
|
|||||||
/* Define to 1 if you have the `symlink' function. */
|
/* Define to 1 if you have the `symlink' function. */
|
||||||
#define HAVE_SYMLINK 1
|
#define HAVE_SYMLINK 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the `sysconf' function. */
|
||||||
|
#define HAVE_SYSCONF 1
|
||||||
|
|
||||||
/* Define to 1 if you have the <sys/acl.h> header file. */
|
/* Define to 1 if you have the <sys/acl.h> header file. */
|
||||||
/* #undef HAVE_SYS_ACL_H */
|
/* #undef HAVE_SYS_ACL_H */
|
||||||
|
|
||||||
@ -1276,10 +1296,10 @@ typedef uint64_t uintmax_t;
|
|||||||
#define ICONV_CONST
|
#define ICONV_CONST
|
||||||
|
|
||||||
/* Version number of libarchive as a single integer */
|
/* Version number of libarchive as a single integer */
|
||||||
#define LIBARCHIVE_VERSION_NUMBER "3007000"
|
#define LIBARCHIVE_VERSION_NUMBER "3007004"
|
||||||
|
|
||||||
/* Version number of libarchive */
|
/* Version number of libarchive */
|
||||||
#define LIBARCHIVE_VERSION_STRING "3.7.0"
|
#define LIBARCHIVE_VERSION_STRING "3.7.4"
|
||||||
|
|
||||||
/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
|
/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
|
||||||
slash. */
|
slash. */
|
||||||
@ -1333,7 +1353,7 @@ typedef uint64_t uintmax_t;
|
|||||||
#endif /* SAFE_TO_DEFINE_EXTENSIONS */
|
#endif /* SAFE_TO_DEFINE_EXTENSIONS */
|
||||||
|
|
||||||
/* Version number of package */
|
/* Version number of package */
|
||||||
#define VERSION "3.7.0"
|
#define VERSION "3.7.4"
|
||||||
|
|
||||||
/* Number of bits in a file offset, on hosts where this is settable. */
|
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||||
/* #undef _FILE_OFFSET_BITS */
|
/* #undef _FILE_OFFSET_BITS */
|
||||||
|
2
contrib/libuv
vendored
2
contrib/libuv
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 4482964660c77eec1166cd7d14fb915e3dbd774a
|
Subproject commit 714b58b9849568211ade86b44dd91d37f8a2175e
|
@ -10,6 +10,7 @@ set(uv_sources
|
|||||||
src/random.c
|
src/random.c
|
||||||
src/strscpy.c
|
src/strscpy.c
|
||||||
src/strtok.c
|
src/strtok.c
|
||||||
|
src/thread-common.c
|
||||||
src/threadpool.c
|
src/threadpool.c
|
||||||
src/timer.c
|
src/timer.c
|
||||||
src/uv-common.c
|
src/uv-common.c
|
||||||
@ -70,10 +71,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
|||||||
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
|
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
|
||||||
list(APPEND uv_libraries rt)
|
list(APPEND uv_libraries rt)
|
||||||
list(APPEND uv_sources
|
list(APPEND uv_sources
|
||||||
src/unix/epoll.c
|
src/unix/linux.c
|
||||||
src/unix/linux-core.c
|
|
||||||
src/unix/linux-inotify.c
|
|
||||||
src/unix/linux-syscalls.c
|
|
||||||
src/unix/procfs-exepath.c
|
src/unix/procfs-exepath.c
|
||||||
src/unix/random-getrandom.c
|
src/unix/random-getrandom.c
|
||||||
src/unix/random-sysctl-linux.c)
|
src/unix/random-sysctl-linux.c)
|
||||||
|
@ -140,6 +140,12 @@ if (CMAKE_CROSSCOMPILING)
|
|||||||
message (STATUS "CROSS COMPILING SET LLVM HOST TRIPLE ${LLVM_HOST_TRIPLE}")
|
message (STATUS "CROSS COMPILING SET LLVM HOST TRIPLE ${LLVM_HOST_TRIPLE}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# llvm-project/llvm/cmake/config-ix.cmake does a weird thing: it defines _LARGEFILE64_SOURCE,
|
||||||
|
# then checks if lseek64() function exists, then undefines _LARGEFILE64_SOURCE.
|
||||||
|
# Then the actual code that uses this function *doesn't* define _LARGEFILE64_SOURCE, so lseek64()
|
||||||
|
# may not exist and compilation fails. This happens with musl.
|
||||||
|
add_compile_definitions("_LARGEFILE64_SOURCE")
|
||||||
|
|
||||||
add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}")
|
add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}")
|
||||||
|
|
||||||
set_directory_properties (PROPERTIES
|
set_directory_properties (PROPERTIES
|
||||||
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
|
Subproject commit b3e62c440f390e12e77c80675f883af82ad3d5ed
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
|||||||
Subproject commit cc385041b226d1fc28ead14dbab5d40a5f821dd8
|
Subproject commit 5be834147d5b5dd77ca2b821f356982029320513
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.8.2.3"
|
ARG VERSION="24.8.4.13"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.8.2.3"
|
ARG VERSION="24.8.4.13"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="24.8.2.3"
|
ARG VERSION="24.8.4.13"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
#docker-official-library:off
|
#docker-official-library:off
|
||||||
|
@ -109,7 +109,7 @@ if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CL
|
|||||||
<networks>
|
<networks>
|
||||||
<ip>::/0</ip>
|
<ip>::/0</ip>
|
||||||
</networks>
|
</networks>
|
||||||
<password>${CLICKHOUSE_PASSWORD}</password>
|
<password><![CDATA[${CLICKHOUSE_PASSWORD//]]>/]]]]><![CDATA[>}]]></password>
|
||||||
<quota>default</quota>
|
<quota>default</quota>
|
||||||
<access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
|
<access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
|
||||||
</${CLICKHOUSE_USER}>
|
</${CLICKHOUSE_USER}>
|
||||||
|
@ -124,6 +124,8 @@ function setup_logs_replication
|
|||||||
check_logs_credentials || return 0
|
check_logs_credentials || return 0
|
||||||
__set_connection_args
|
__set_connection_args
|
||||||
|
|
||||||
|
echo "My hostname is ${HOSTNAME}"
|
||||||
|
|
||||||
echo 'Create all configured system logs'
|
echo 'Create all configured system logs'
|
||||||
clickhouse-client --query "SYSTEM FLUSH LOGS"
|
clickhouse-client --query "SYSTEM FLUSH LOGS"
|
||||||
|
|
||||||
@ -184,7 +186,17 @@ function setup_logs_replication
|
|||||||
/^TTL /d
|
/^TTL /d
|
||||||
')
|
')
|
||||||
|
|
||||||
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
|
echo -e "Creating remote destination table ${table}_${hash} with statement:" >&2
|
||||||
|
|
||||||
|
echo "::group::${table}"
|
||||||
|
# there's the only way big "$statement" can be printed without causing EAGAIN error
|
||||||
|
# cat: write error: Resource temporarily unavailable
|
||||||
|
statement_print="${statement}"
|
||||||
|
if [ "${#statement_print}" -gt 4000 ]; then
|
||||||
|
statement_print="${statement::1999}\n…\n${statement:${#statement}-1999}"
|
||||||
|
fi
|
||||||
|
echo -e "$statement_print"
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
||||||
--distributed_ddl_task_timeout=30 --distributed_ddl_output_mode=throw_only_active \
|
--distributed_ddl_task_timeout=30 --distributed_ddl_output_mode=throw_only_active \
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
|
|
||||||
FROM alpine:3.18
|
FROM alpine:3.18
|
||||||
RUN apk add --no-cache -U iproute2 \
|
RUN apk add --no-cache -U iproute2 \
|
||||||
&& for bin in iptables iptables-restore iptables-save; \
|
&& for bin in \
|
||||||
|
iptables iptables-restore iptables-save \
|
||||||
|
ip6tables ip6tables-restore ip6tables-save; \
|
||||||
do ln -sf xtables-nft-multi "/sbin/$bin"; \
|
do ln -sf xtables-nft-multi "/sbin/$bin"; \
|
||||||
done
|
done
|
||||||
|
32
docs/changelogs/v24.3.10.33-lts.md
Normal file
32
docs/changelogs/v24.3.10.33-lts.md
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.3.10.33-lts (37b6502ebf0) FIXME as compared to v24.3.9.5-lts (a939270465e)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#68870](https://github.com/ClickHouse/ClickHouse/issues/68870): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Backported in [#69095](https://github.com/ClickHouse/ClickHouse/issues/69095): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#68995](https://github.com/ClickHouse/ClickHouse/issues/68995): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||||
|
* Backported in [#68844](https://github.com/ClickHouse/ClickHouse/issues/68844): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Backported in [#68881](https://github.com/ClickHouse/ClickHouse/issues/68881): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||||
|
* Backported in [#69054](https://github.com/ClickHouse/ClickHouse/issues/69054): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68856](https://github.com/ClickHouse/ClickHouse/issues/68856): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#69152](https://github.com/ClickHouse/ClickHouse/issues/69152): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#69112](https://github.com/ClickHouse/ClickHouse/issues/69112): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
|
||||||
|
#### NO CL CATEGORY
|
||||||
|
|
||||||
|
* Backported in [#68938](https://github.com/ClickHouse/ClickHouse/issues/68938):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#68826](https://github.com/ClickHouse/ClickHouse/issues/68826): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#68754](https://github.com/ClickHouse/ClickHouse/issues/68754): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#69044](https://github.com/ClickHouse/ClickHouse/issues/69044): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
17
docs/changelogs/v24.3.11.7-lts.md
Normal file
17
docs/changelogs/v24.3.11.7-lts.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.3.11.7-lts (28795d0a47e) FIXME as compared to v24.3.10.33-lts (37b6502ebf0)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#67479](https://github.com/ClickHouse/ClickHouse/issues/67479): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#69243](https://github.com/ClickHouse/ClickHouse/issues/69243): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#69221](https://github.com/ClickHouse/ClickHouse/issues/69221): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||||
|
|
29
docs/changelogs/v24.5.7.31-stable.md
Normal file
29
docs/changelogs/v24.5.7.31-stable.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.5.7.31-stable (6c185e9aec1) FIXME as compared to v24.5.6.45-stable (bdca8604c29)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#68564](https://github.com/ClickHouse/ClickHouse/issues/68564): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68996](https://github.com/ClickHouse/ClickHouse/issues/68996): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||||
|
* Backported in [#68865](https://github.com/ClickHouse/ClickHouse/issues/68865): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Backported in [#69004](https://github.com/ClickHouse/ClickHouse/issues/69004): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68882](https://github.com/ClickHouse/ClickHouse/issues/68882): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||||
|
* Backported in [#69023](https://github.com/ClickHouse/ClickHouse/issues/69023): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68858](https://github.com/ClickHouse/ClickHouse/issues/68858): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68784](https://github.com/ClickHouse/ClickHouse/issues/68784): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Backported in [#69154](https://github.com/ClickHouse/ClickHouse/issues/69154): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NO CL CATEGORY
|
||||||
|
|
||||||
|
* Backported in [#68940](https://github.com/ClickHouse/ClickHouse/issues/68940):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#68828](https://github.com/ClickHouse/ClickHouse/issues/68828): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#69046](https://github.com/ClickHouse/ClickHouse/issues/69046): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
18
docs/changelogs/v24.5.8.10-stable.md
Normal file
18
docs/changelogs/v24.5.8.10-stable.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.5.8.10-stable (f11729638ea) FIXME as compared to v24.5.7.31-stable (6c185e9aec1)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#69295](https://github.com/ClickHouse/ClickHouse/issues/69295): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#69245](https://github.com/ClickHouse/ClickHouse/issues/69245): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix crash when using `s3` table function with GLOB paths and filters. [#69176](https://github.com/ClickHouse/ClickHouse/pull/69176) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#69223](https://github.com/ClickHouse/ClickHouse/issues/69223): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||||
|
|
29
docs/changelogs/v24.6.5.30-stable.md
Normal file
29
docs/changelogs/v24.6.5.30-stable.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.6.5.30-stable (e6e196c92d6) FIXME as compared to v24.6.4.42-stable (c534bb4b4dd)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#68969](https://github.com/ClickHouse/ClickHouse/issues/68969): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||||
|
* Backported in [#68814](https://github.com/ClickHouse/ClickHouse/issues/68814): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Backported in [#69005](https://github.com/ClickHouse/ClickHouse/issues/69005): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68883](https://github.com/ClickHouse/ClickHouse/issues/68883): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||||
|
* Backported in [#69025](https://github.com/ClickHouse/ClickHouse/issues/69025): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68860](https://github.com/ClickHouse/ClickHouse/issues/68860): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68786](https://github.com/ClickHouse/ClickHouse/issues/68786): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Backported in [#69156](https://github.com/ClickHouse/ClickHouse/issues/69156): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#69116](https://github.com/ClickHouse/ClickHouse/issues/69116): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
|
||||||
|
#### NO CL CATEGORY
|
||||||
|
|
||||||
|
* Backported in [#68942](https://github.com/ClickHouse/ClickHouse/issues/68942):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#68830](https://github.com/ClickHouse/ClickHouse/issues/68830): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#69048](https://github.com/ClickHouse/ClickHouse/issues/69048): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
16
docs/changelogs/v24.6.6.6-stable.md
Normal file
16
docs/changelogs/v24.6.6.6-stable.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.6.6.6-stable (a4c4580e639) FIXME as compared to v24.6.5.30-stable (e6e196c92d6)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#69197](https://github.com/ClickHouse/ClickHouse/issues/69197): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#69225](https://github.com/ClickHouse/ClickHouse/issues/69225): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||||
|
|
17
docs/changelogs/v24.7.6.8-stable.md
Normal file
17
docs/changelogs/v24.7.6.8-stable.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.7.6.8-stable (7779883593a) FIXME as compared to v24.7.5.37-stable (f2533ca97be)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#69198](https://github.com/ClickHouse/ClickHouse/issues/69198): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#69249](https://github.com/ClickHouse/ClickHouse/issues/69249): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#69227](https://github.com/ClickHouse/ClickHouse/issues/69227): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||||
|
|
50
docs/changelogs/v24.8.3.59-lts.md
Normal file
50
docs/changelogs/v24.8.3.59-lts.md
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.8.3.59-lts (e729b9fa40e) FIXME as compared to v24.8.2.3-lts (b54f79ed323)
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Backported in [#68710](https://github.com/ClickHouse/ClickHouse/issues/68710): Query cache entries can now be dropped by tag. For example, the query cache entry created by `SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'` can now be dropped by `SYSTEM DROP QUERY CACHE TAG 'abc'` (or of course just: `SYSTEM DROP QUERY CACHE` which will clear the entire query cache). [#68477](https://github.com/ClickHouse/ClickHouse/pull/68477) ([Michał Tabaszewski](https://github.com/pinsvin00)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#69097](https://github.com/ClickHouse/ClickHouse/issues/69097): Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#68973](https://github.com/ClickHouse/ClickHouse/issues/68973): Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||||
|
* Backported in [#68818](https://github.com/ClickHouse/ClickHouse/issues/68818): Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Backported in [#68893](https://github.com/ClickHouse/ClickHouse/issues/68893): After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68721](https://github.com/ClickHouse/ClickHouse/issues/68721): Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||||
|
* Backported in [#69029](https://github.com/ClickHouse/ClickHouse/issues/69029): Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68864](https://github.com/ClickHouse/ClickHouse/issues/68864): Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68854](https://github.com/ClickHouse/ClickHouse/issues/68854): Fix possible error `DB::Exception: Block structure mismatch in joined block stream: different columns:` with new JSON column. [#68686](https://github.com/ClickHouse/ClickHouse/pull/68686) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68790](https://github.com/ClickHouse/ClickHouse/issues/68790): Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Backported in [#69108](https://github.com/ClickHouse/ClickHouse/issues/69108): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#68850](https://github.com/ClickHouse/ClickHouse/issues/68850): Fix resolving dynamic subcolumns from subqueries in analyzer. [#68824](https://github.com/ClickHouse/ClickHouse/pull/68824) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68911](https://github.com/ClickHouse/ClickHouse/issues/68911): Fix complex types metadata parsing in DeltaLake. Closes [#68739](https://github.com/ClickHouse/ClickHouse/issues/68739). [#68836](https://github.com/ClickHouse/ClickHouse/pull/68836) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#69160](https://github.com/ClickHouse/ClickHouse/issues/69160): Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#69072](https://github.com/ClickHouse/ClickHouse/issues/69072): Fixed writing to Materialized Views with enabled setting `optimize_functions_to_subcolumns`. [#68951](https://github.com/ClickHouse/ClickHouse/pull/68951) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#69016](https://github.com/ClickHouse/ClickHouse/issues/69016): Don't use serializations cache in const Dynamic column methods. It could let to use-of-unitialized value or even race condition during aggregations. [#68953](https://github.com/ClickHouse/ClickHouse/pull/68953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#69120](https://github.com/ClickHouse/ClickHouse/issues/69120): Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
|
||||||
|
#### NO CL CATEGORY
|
||||||
|
|
||||||
|
* Backported in [#68947](https://github.com/ClickHouse/ClickHouse/issues/68947):. [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#68704](https://github.com/ClickHouse/ClickHouse/issues/68704): Fix enumerating dynamic subcolumns. [#68582](https://github.com/ClickHouse/ClickHouse/pull/68582) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#69000](https://github.com/ClickHouse/ClickHouse/issues/69000): Prioritizing of virtual columns in hive partitioning. [#68606](https://github.com/ClickHouse/ClickHouse/pull/68606) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Backported in [#68799](https://github.com/ClickHouse/ClickHouse/issues/68799): CI: Disable SQLLogic job. [#68654](https://github.com/ClickHouse/ClickHouse/pull/68654) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#68834](https://github.com/ClickHouse/ClickHouse/issues/68834): Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`. [#68715](https://github.com/ClickHouse/ClickHouse/pull/68715) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#68781](https://github.com/ClickHouse/ClickHouse/issues/68781): Fix flaky test 00989_parallel_parts_loading. [#68737](https://github.com/ClickHouse/ClickHouse/pull/68737) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#68762](https://github.com/ClickHouse/ClickHouse/issues/68762): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#68810](https://github.com/ClickHouse/ClickHouse/issues/68810): Try to disable rerun check if job triggered manually. [#68751](https://github.com/ClickHouse/ClickHouse/pull/68751) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#68962](https://github.com/ClickHouse/ClickHouse/issues/68962): Fix 2477 timeout. [#68752](https://github.com/ClickHouse/ClickHouse/pull/68752) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Backported in [#68977](https://github.com/ClickHouse/ClickHouse/issues/68977): Check setting use_json_alias_for_old_object_type in runtime. [#68793](https://github.com/ClickHouse/ClickHouse/pull/68793) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68852](https://github.com/ClickHouse/ClickHouse/issues/68852): Make dynamic structure selection more consistent. [#68802](https://github.com/ClickHouse/ClickHouse/pull/68802) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#69052](https://github.com/ClickHouse/ClickHouse/issues/69052): Fix 01114_database_atomic flakiness. [#68930](https://github.com/ClickHouse/ClickHouse/pull/68930) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
22
docs/changelogs/v24.8.4.13-lts.md
Normal file
22
docs/changelogs/v24.8.4.13-lts.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.8.4.13-lts (53195bc189b) FIXME as compared to v24.8.3.59-lts (e729b9fa40e)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#68699](https://github.com/ClickHouse/ClickHouse/issues/68699): Delete old code of named collections from dictionaries and substitute it to the new, which allows to use DDL created named collections in dictionaries. Closes [#60936](https://github.com/ClickHouse/ClickHouse/issues/60936), closes [#36890](https://github.com/ClickHouse/ClickHouse/issues/36890). [#68412](https://github.com/ClickHouse/ClickHouse/pull/68412) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#69231](https://github.com/ClickHouse/ClickHouse/issues/69231): Fix parsing error when null should be inserted as default in some cases during JSON type parsing. [#68955](https://github.com/ClickHouse/ClickHouse/pull/68955) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#69251](https://github.com/ClickHouse/ClickHouse/issues/69251): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#69189](https://github.com/ClickHouse/ClickHouse/issues/69189): Don't create Object type if use_json_alias_for_old_object_type=1 but allow_experimental_object_type=0. [#69150](https://github.com/ClickHouse/ClickHouse/pull/69150) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#69229](https://github.com/ClickHouse/ClickHouse/issues/69229): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#69219](https://github.com/ClickHouse/ClickHouse/issues/69219): Disable perf-like test with sanitizers. [#69194](https://github.com/ClickHouse/ClickHouse/pull/69194) ([alesapin](https://github.com/alesapin)).
|
||||||
|
|
72
docs/en/engines/table-engines/integrations/azure-queue.md
Normal file
72
docs/en/engines/table-engines/integrations/azure-queue.md
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
---
|
||||||
|
slug: /en/engines/table-engines/integrations/azure-queue
|
||||||
|
sidebar_position: 181
|
||||||
|
sidebar_label: AzureQueue
|
||||||
|
---
|
||||||
|
|
||||||
|
# AzureQueue Table Engine
|
||||||
|
|
||||||
|
This engine provides an integration with [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) ecosystem, allowing streaming data import.
|
||||||
|
|
||||||
|
## Create Table {#creating-a-table}
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE test (name String, value UInt32)
|
||||||
|
ENGINE = AzureQueue(...)
|
||||||
|
[SETTINGS]
|
||||||
|
[mode = '',]
|
||||||
|
[after_processing = 'keep',]
|
||||||
|
[keeper_path = '',]
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
**Engine parameters**
|
||||||
|
|
||||||
|
`AzureQueue` parameters are the same as `AzureBlobStorage` table engine supports. See parameters section [here](../../../engines/table-engines/integrations/azureBlobStorage.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE azure_queue_engine_table (name String, value UInt32)
|
||||||
|
ENGINE=AzureQueue('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/data/')
|
||||||
|
SETTINGS
|
||||||
|
mode = 'unordered'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Settings {#settings}
|
||||||
|
|
||||||
|
The set of supported settings is the same as for `S3Queue` table engine, but without `s3queue_` prefix. See [full list of settings settings](../../../engines/table-engines/integrations/s3queue.md#settings).
|
||||||
|
|
||||||
|
## Description {#description}
|
||||||
|
|
||||||
|
`SELECT` is not particularly useful for streaming import (except for debugging), because each file can be imported only once. It is more practical to create real-time threads using [materialized views](../../../sql-reference/statements/create/view.md). To do this:
|
||||||
|
|
||||||
|
1. Use the engine to create a table for consuming from specified path in S3 and consider it a data stream.
|
||||||
|
2. Create a table with the desired structure.
|
||||||
|
3. Create a materialized view that converts data from the engine and puts it into a previously created table.
|
||||||
|
|
||||||
|
When the `MATERIALIZED VIEW` joins the engine, it starts collecting data in the background.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE azure_queue_engine_table (name String, value UInt32)
|
||||||
|
ENGINE=AzureQueue('<endpoint>', 'CSV', 'gzip')
|
||||||
|
SETTINGS
|
||||||
|
mode = 'unordered';
|
||||||
|
|
||||||
|
CREATE TABLE stats (name String, value UInt32)
|
||||||
|
ENGINE = MergeTree() ORDER BY name;
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW consumer TO stats
|
||||||
|
AS SELECT name, value FROM azure_queue_engine_table;
|
||||||
|
|
||||||
|
SELECT * FROM stats ORDER BY name;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Virtual columns {#virtual-columns}
|
||||||
|
|
||||||
|
- `_path` — Path to the file.
|
||||||
|
- `_file` — Name of the file.
|
||||||
|
|
||||||
|
For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns).
|
@ -35,7 +35,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
|||||||
[SETTINGS ...]
|
[SETTINGS ...]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Engine parameters
|
### Engine parameters {#parameters}
|
||||||
|
|
||||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||||
|
@ -5,6 +5,7 @@ sidebar_label: S3Queue
|
|||||||
---
|
---
|
||||||
|
|
||||||
# S3Queue Table Engine
|
# S3Queue Table Engine
|
||||||
|
|
||||||
This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem and allows streaming import. This engine is similar to the [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) engines, but provides S3-specific features.
|
This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem and allows streaming import. This engine is similar to the [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) engines, but provides S3-specific features.
|
||||||
|
|
||||||
## Create Table {#creating-a-table}
|
## Create Table {#creating-a-table}
|
||||||
@ -16,27 +17,25 @@ CREATE TABLE s3_queue_engine_table (name String, value UInt32)
|
|||||||
[mode = '',]
|
[mode = '',]
|
||||||
[after_processing = 'keep',]
|
[after_processing = 'keep',]
|
||||||
[keeper_path = '',]
|
[keeper_path = '',]
|
||||||
[s3queue_loading_retries = 0,]
|
[loading_retries = 0,]
|
||||||
[s3queue_processing_threads_num = 1,]
|
[processing_threads_num = 1,]
|
||||||
[s3queue_enable_logging_to_s3queue_log = 0,]
|
[enable_logging_to_s3queue_log = 0,]
|
||||||
[s3queue_polling_min_timeout_ms = 1000,]
|
[polling_min_timeout_ms = 1000,]
|
||||||
[s3queue_polling_max_timeout_ms = 10000,]
|
[polling_max_timeout_ms = 10000,]
|
||||||
[s3queue_polling_backoff_ms = 0,]
|
[polling_backoff_ms = 0,]
|
||||||
[s3queue_tracked_file_ttl_sec = 0,]
|
[tracked_file_ttl_sec = 0,]
|
||||||
[s3queue_tracked_files_limit = 1000,]
|
[tracked_files_limit = 1000,]
|
||||||
[s3queue_cleanup_interval_min_ms = 10000,]
|
[cleanup_interval_min_ms = 10000,]
|
||||||
[s3queue_cleanup_interval_max_ms = 30000,]
|
[cleanup_interval_max_ms = 30000,]
|
||||||
```
|
```
|
||||||
|
|
||||||
Starting with `24.7` settings without `s3queue_` prefix are also supported.
|
:::warning
|
||||||
|
Before `24.7`, it is required to use `s3queue_` prefix for all settings apart from `mode`, `after_processing` and `keeper_path`.
|
||||||
|
:::
|
||||||
|
|
||||||
**Engine parameters**
|
**Engine parameters**
|
||||||
|
|
||||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
`S3Queue` parameters are the same as `S3` table engine supports. See parameters section [here](../../../engines/table-engines/integrations/s3.md#parameters).
|
||||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
|
||||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
|
||||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
|
||||||
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension.
|
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -107,19 +107,24 @@ The vector similarity index currently does not work with per-table, non-default
|
|||||||
[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml.
|
[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
Vector index creation is known to be slow. To speed the process up, index creation can be parallelized. The maximum number of threads can be
|
||||||
|
configured using server configuration
|
||||||
|
setting [max_build_vector_similarity_index_thread_pool_size](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters_max_build_vector_similarity_index_thread_pool_size).
|
||||||
|
|
||||||
ANN indexes are built during column insertion and merge. As a result, `INSERT` and `OPTIMIZE` statements will be slower than for ordinary
|
ANN indexes are built during column insertion and merge. As a result, `INSERT` and `OPTIMIZE` statements will be slower than for ordinary
|
||||||
tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write
|
tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write
|
||||||
requests.
|
requests.
|
||||||
|
|
||||||
ANN indexes support these queries:
|
ANN indexes support this type of query:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT *
|
WITH [...] AS reference_vector
|
||||||
FROM table
|
SELECT *
|
||||||
[WHERE ...]
|
FROM table
|
||||||
ORDER BY Distance(vectors, Point)
|
WHERE ... -- WHERE clause is optional
|
||||||
LIMIT N
|
ORDER BY Distance(vectors, reference_vector)
|
||||||
```
|
LIMIT N
|
||||||
|
```
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
To avoid writing out large vectors, you can use [query
|
To avoid writing out large vectors, you can use [query
|
||||||
|
@ -989,19 +989,52 @@ ALTER TABLE tab DROP STATISTICS a;
|
|||||||
These lightweight statistics aggregate information about distribution of values in columns. Statistics are stored in every part and updated when every insert comes.
|
These lightweight statistics aggregate information about distribution of values in columns. Statistics are stored in every part and updated when every insert comes.
|
||||||
They can be used for prewhere optimization only if we enable `set allow_statistics_optimize = 1`.
|
They can be used for prewhere optimization only if we enable `set allow_statistics_optimize = 1`.
|
||||||
|
|
||||||
#### Available Types of Column Statistics {#available-types-of-column-statistics}
|
### Available Types of Column Statistics {#available-types-of-column-statistics}
|
||||||
|
|
||||||
|
- `MinMax`
|
||||||
|
|
||||||
|
The minimum and maximum column value which allows to estimate the selectivity of range filters on numeric columns.
|
||||||
|
|
||||||
|
Syntax: `minmax`
|
||||||
|
|
||||||
- `TDigest`
|
- `TDigest`
|
||||||
|
|
||||||
[TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns.
|
[TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns.
|
||||||
|
|
||||||
|
Syntax: `tdigest`
|
||||||
|
|
||||||
- `Uniq`
|
- `Uniq`
|
||||||
|
|
||||||
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
|
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
|
||||||
|
|
||||||
- `count_min`
|
Syntax: `uniq`
|
||||||
|
|
||||||
|
- `CountMin`
|
||||||
|
|
||||||
|
[CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
|
||||||
|
|
||||||
|
Syntax `countmin`
|
||||||
|
|
||||||
|
|
||||||
|
### Supported Data Types {#supported-data-types}
|
||||||
|
|
||||||
|
| | (U)Int*, Float*, Decimal(*), Date*, Boolean, Enum* | String or FixedString |
|
||||||
|
|-----------|----------------------------------------------------|-----------------------|
|
||||||
|
| CountMin | ✔ | ✔ |
|
||||||
|
| MinMax | ✔ | ✗ |
|
||||||
|
| TDigest | ✔ | ✗ |
|
||||||
|
| Uniq | ✔ | ✔ |
|
||||||
|
|
||||||
|
|
||||||
|
### Supported Operations {#supported-operations}
|
||||||
|
|
||||||
|
| | Equality filters (==) | Range filters (>, >=, <, <=) |
|
||||||
|
|-----------|-----------------------|------------------------------|
|
||||||
|
| CountMin | ✔ | ✗ |
|
||||||
|
| MinMax | ✗ | ✔ |
|
||||||
|
| TDigest | ✗ | ✔ |
|
||||||
|
| Uniq | ✔ | ✗ |
|
||||||
|
|
||||||
[Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
|
|
||||||
|
|
||||||
## Column-level Settings {#column-level-settings}
|
## Column-level Settings {#column-level-settings}
|
||||||
|
|
||||||
|
@ -39,6 +39,7 @@ The supported formats are:
|
|||||||
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
|
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
|
||||||
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
|
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
|
||||||
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
|
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
|
||||||
|
| [JSONCompactWithProgress](#jsoncompactwithprogress) | ✗ | ✔ |
|
||||||
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
|
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
|
||||||
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
|
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
|
||||||
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
|
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
|
||||||
@ -988,6 +989,59 @@ Example:
|
|||||||
|
|
||||||
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here)
|
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here)
|
||||||
|
|
||||||
|
## JSONCompactWithProgress (#jsoncompactwithprogress)
|
||||||
|
|
||||||
|
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
|
||||||
|
|
||||||
|
Each row is either a metadata object, data object, progress information or statistics object:
|
||||||
|
|
||||||
|
1. **Metadata Object (`meta`)**
|
||||||
|
- Describes the structure of the data rows.
|
||||||
|
- Fields: `name` (column name), `type` (data type, e.g., `UInt32`, `String`, etc.).
|
||||||
|
- Example: `{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}`
|
||||||
|
- Appears before any data objects.
|
||||||
|
|
||||||
|
2. **Data Object (`data`)**
|
||||||
|
- Represents a row of query results.
|
||||||
|
- Fields: An array with values corresponding to the columns defined in the metadata.
|
||||||
|
- Example: `{"data":["1", "John Doe"]}`
|
||||||
|
- Appears after the metadata object, one per row.
|
||||||
|
|
||||||
|
3. **Progress Information Object (`progress`)**
|
||||||
|
- Provides real-time progress feedback during query execution.
|
||||||
|
- Fields: `read_rows`, `read_bytes`, `written_rows`, `written_bytes`, `total_rows_to_read`, `result_rows`, `result_bytes`, `elapsed_ns`.
|
||||||
|
- Example: `{"progress":{"read_rows":"8","read_bytes":"168"}}`
|
||||||
|
- May appear intermittently.
|
||||||
|
|
||||||
|
4. **Statistics Object (`statistics`)**
|
||||||
|
- Summarizes query execution statistics.
|
||||||
|
- Fields: `rows`, `rows_before_limit_at_least`, `elapsed`, `rows_read`, `bytes_read`.
|
||||||
|
- Example: `{"statistics": {"rows":2, "elapsed":0.001995, "rows_read":8}}`
|
||||||
|
- Appears at the end.
|
||||||
|
|
||||||
|
5. **Exception Object (`exception`)**
|
||||||
|
- Represents an error that occurred during query execution.
|
||||||
|
- Fields: A single text field containing the error message.
|
||||||
|
- Example: `{"exception": "Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero..."}`
|
||||||
|
- Appears when an error is encountered.
|
||||||
|
|
||||||
|
6. **Totals Object (`totals`)**
|
||||||
|
- Provides the totals for each numeric column in the result set.
|
||||||
|
- Fields: An array with total values corresponding to the columns defined in the metadata.
|
||||||
|
- Example: `{"totals": ["", "3"]}`
|
||||||
|
- Appears at the end of the data rows, if applicable.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}
|
||||||
|
{"progress":{"read_rows":"8","read_bytes":"168","written_rows":"0","written_bytes":"0","total_rows_to_read":"2","result_rows":"0","result_bytes":"0","elapsed_ns":"0"}}
|
||||||
|
{"data":["1", "John Doe"]}
|
||||||
|
{"data":["2", "Joe Doe"]}
|
||||||
|
{"statistics": {"rows":2, "rows_before_limit_at_least":8, "elapsed":0.001995, "rows_read":8, "bytes_read":168}}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## JSONEachRow {#jsoneachrow}
|
## JSONEachRow {#jsoneachrow}
|
||||||
|
|
||||||
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
|
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
|
||||||
@ -1342,6 +1396,7 @@ SELECT * FROM json_each_row_nested
|
|||||||
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - ignore unknown keys in json object for named tuples. Default value - `false`.
|
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - ignore unknown keys in json object for named tuples. Default value - `false`.
|
||||||
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||||
- [input_format_json_throw_on_bad_escape_sequence](/docs/en/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) - throw an exception if JSON string contains bad escape sequence. If disabled, bad escape sequences will remain as is in the data. Default value - `true`.
|
- [input_format_json_throw_on_bad_escape_sequence](/docs/en/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) - throw an exception if JSON string contains bad escape sequence. If disabled, bad escape sequences will remain as is in the data. Default value - `true`.
|
||||||
|
- [input_format_json_empty_as_default](/docs/en/operations/settings/settings-formats.md/#input_format_json_empty_as_default) - treat empty fields in JSON input as default values. Default value - `false`. For complex default expressions [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) must be enabled too.
|
||||||
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
||||||
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
||||||
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
||||||
|
10
docs/en/interfaces/third-party/gui.md
vendored
10
docs/en/interfaces/third-party/gui.md
vendored
@ -233,6 +233,16 @@ Features:
|
|||||||
- Useful tools: Zookeeper data exploration, query EXPLAIN, kill queries, etc.
|
- Useful tools: Zookeeper data exploration, query EXPLAIN, kill queries, etc.
|
||||||
- Visualization metric charts: queries and resource usage, number of merges/mutation, merge performance, query performance, etc.
|
- Visualization metric charts: queries and resource usage, number of merges/mutation, merge performance, query performance, etc.
|
||||||
|
|
||||||
|
### CKibana {#ckibana}
|
||||||
|
|
||||||
|
[CKibana](https://github.com/TongchengOpenSource/ckibana) is a lightweight service that allows you to effortlessly search, explore, and visualize ClickHouse data using the native Kibana UI.
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
- Translates chart requests from the native Kibana UI into ClickHouse query syntax.
|
||||||
|
- Supports advanced features such as sampling and caching to enhance query performance.
|
||||||
|
- Minimizes the learning cost for users after migrating from ElasticSearch to ClickHouse.
|
||||||
|
|
||||||
## Commercial {#commercial}
|
## Commercial {#commercial}
|
||||||
|
|
||||||
### DataGrip {#datagrip}
|
### DataGrip {#datagrip}
|
||||||
|
@ -6,7 +6,7 @@ import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.m
|
|||||||
|
|
||||||
<SelfManaged />
|
<SelfManaged />
|
||||||
|
|
||||||
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` or `subjectAltName extension` field of the certificate is used to identify the connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` or `subjectAltName extension` field of the certificate is used to identify the connected user. `subjectAltName extension` supports the usage of one wildcard '*' in the server configuration. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
||||||
|
|
||||||
To enable SSL certificate authentication, a list of `Common Name`'s or `Subject Alt Name`'s for each ClickHouse user must be specified in the settings file `users.xml `:
|
To enable SSL certificate authentication, a list of `Common Name`'s or `Subject Alt Name`'s for each ClickHouse user must be specified in the settings file `users.xml `:
|
||||||
|
|
||||||
@ -30,6 +30,12 @@ To enable SSL certificate authentication, a list of `Common Name`'s or `Subject
|
|||||||
</ssl_certificates>
|
</ssl_certificates>
|
||||||
<!-- Other settings -->
|
<!-- Other settings -->
|
||||||
</user_name_2>
|
</user_name_2>
|
||||||
|
<user_name_3>
|
||||||
|
<ssl_certificates>
|
||||||
|
<!-- Wildcard support -->
|
||||||
|
<subject_alt_name>URI:spiffe://foo.com/*/bar</subject_alt_name>
|
||||||
|
</ssl_certificates>
|
||||||
|
</user_name_3>
|
||||||
</users>
|
</users>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
@ -491,6 +491,14 @@ Type: Double
|
|||||||
|
|
||||||
Default: 0.9
|
Default: 0.9
|
||||||
|
|
||||||
|
## max_build_vector_similarity_index_thread_pool_size {#server_configuration_parameters_max_build_vector_similarity_index_thread_pool_size}
|
||||||
|
|
||||||
|
The maximum number of threads to use for building vector indexes. 0 means all cores.
|
||||||
|
|
||||||
|
Type: UInt64
|
||||||
|
|
||||||
|
Default: 16
|
||||||
|
|
||||||
## cgroups_memory_usage_observer_wait_time
|
## cgroups_memory_usage_observer_wait_time
|
||||||
|
|
||||||
Interval in seconds during which the server's maximum allowed memory consumption is adjusted by the corresponding threshold in cgroups. (see
|
Interval in seconds during which the server's maximum allowed memory consumption is adjusted by the corresponding threshold in cgroups. (see
|
||||||
@ -1463,26 +1471,29 @@ Examples:
|
|||||||
|
|
||||||
## logger {#logger}
|
## logger {#logger}
|
||||||
|
|
||||||
Logging settings.
|
The location and format of log messages.
|
||||||
|
|
||||||
Keys:
|
Keys:
|
||||||
|
|
||||||
- `level` – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`.
|
- `level` – Log level. Acceptable values: `none` (turn logging off), `fatal`, `critical`, `error`, `warning`, `notice`, `information`,
|
||||||
- `log` – The log file. Contains all the entries according to `level`.
|
`debug`, `trace`, `test`
|
||||||
- `errorlog` – Error log file.
|
- `log` – The path to the log file.
|
||||||
- `size` – Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
|
- `errorlog` – The path to the error log file.
|
||||||
- `count` – The number of archived log files that ClickHouse stores.
|
- `size` – Rotation policy: Maximum size of the log files in bytes. Once the log file size exceeds this threshold, it is renamed and archived, and a new log file is created.
|
||||||
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
|
- `count` – Rotation policy: How many historical log files Clickhouse are kept at most.
|
||||||
- `console_log_level` – Logging level for console. Default to `level`.
|
- `stream_compress` – Compress log messages using LZ4. Set to `1` or `true` to enable.
|
||||||
- `use_syslog` - Log to syslog as well.
|
- `console` – Do not write log messages to log files, instead print them in the console. Set to `1` or `true` to enable. Default is
|
||||||
- `syslog_level` - Logging level for logging to syslog.
|
`1` if Clickhouse does not run in daemon mode, `0` otherwise.
|
||||||
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
|
- `console_log_level` – Log level for console output. Defaults to `level`.
|
||||||
- `formatting` – Specify log format to be printed in console log (currently only `json` supported).
|
- `formatting` – Log format for console output. Currently, only `json` is supported).
|
||||||
|
- `use_syslog` - Also forward log output to syslog.
|
||||||
|
- `syslog_level` - Log level for logging to syslog.
|
||||||
|
|
||||||
Both log and error log file names (only file names, not directories) support date and time format specifiers.
|
**Log format specifiers**
|
||||||
|
|
||||||
**Format specifiers**
|
File names in `log` and `errorLog` paths support below format specifiers for the resulting file name (the directory part does not support them).
|
||||||
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
|
|
||||||
|
Column “Example” shows the output at `2023-07-06 18:32:07`.
|
||||||
|
|
||||||
| Specifier | Description | Example |
|
| Specifier | Description | Example |
|
||||||
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||||
@ -1537,18 +1548,37 @@ Using the following format specifiers, you can define a pattern for the resultin
|
|||||||
</logger>
|
</logger>
|
||||||
```
|
```
|
||||||
|
|
||||||
Writing to the console can be configured. Config example:
|
To print log messages only in the console:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<logger>
|
<logger>
|
||||||
<level>information</level>
|
<level>information</level>
|
||||||
<console>1</console>
|
<console>true</console>
|
||||||
|
</logger>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Per-level Overrides**
|
||||||
|
|
||||||
|
The log level of individual log names can be overridden. For example, to mute all messages of loggers "Backup" and "RBAC".
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<logger>
|
||||||
|
<levels>
|
||||||
|
<logger>
|
||||||
|
<name>Backup</name>
|
||||||
|
<level>none</level>
|
||||||
|
</logger>
|
||||||
|
<logger>
|
||||||
|
<name>RBAC</name>
|
||||||
|
<level>none</level>
|
||||||
|
</logger>
|
||||||
|
</levels>
|
||||||
</logger>
|
</logger>
|
||||||
```
|
```
|
||||||
|
|
||||||
### syslog
|
### syslog
|
||||||
|
|
||||||
Writing to the syslog is also supported. Config example:
|
To write log messages additionally to syslog:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<logger>
|
<logger>
|
||||||
@ -1562,14 +1592,12 @@ Writing to the syslog is also supported. Config example:
|
|||||||
</logger>
|
</logger>
|
||||||
```
|
```
|
||||||
|
|
||||||
Keys for syslog:
|
Keys for `<syslog>`:
|
||||||
|
|
||||||
- use_syslog — Required setting if you want to write to the syslog.
|
- `address` — The address of syslog in format `host\[:port\]`. If omitted, the local daemon is used.
|
||||||
- address — The host\[:port\] of syslogd. If omitted, the local daemon is used.
|
- `hostname` — The name of the host from which logs are send. Optional.
|
||||||
- hostname — Optional. The name of the host that logs are sent from.
|
- `facility` — The syslog [facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility). Must be specified uppercase with a “LOG_” prefix, e.g. `LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, etc. Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
|
||||||
- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on).
|
- `format` – Log message format. Possible values: `bsd` and `syslog.`
|
||||||
Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
|
|
||||||
- format – Message format. Possible values: `bsd` and `syslog.`
|
|
||||||
|
|
||||||
### Log formats
|
### Log formats
|
||||||
|
|
||||||
@ -1588,6 +1616,7 @@ You can specify the log format that will be outputted in the console log. Curren
|
|||||||
"source_line": "192"
|
"source_line": "192"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
To enable JSON logging support, use the following snippet:
|
To enable JSON logging support, use the following snippet:
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
|
@ -752,6 +752,17 @@ Possible values:
|
|||||||
|
|
||||||
Default value: 0.
|
Default value: 0.
|
||||||
|
|
||||||
|
### input_format_json_empty_as_default {#input_format_json_empty_as_default}
|
||||||
|
|
||||||
|
When enabled, replace empty input fields in JSON with default values. For complex default expressions `input_format_defaults_for_omitted_fields` must be enabled too.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
+ 0 — Disable.
|
||||||
|
+ 1 — Enable.
|
||||||
|
|
||||||
|
Default value: 0.
|
||||||
|
|
||||||
## TSV format settings {#tsv-format-settings}
|
## TSV format settings {#tsv-format-settings}
|
||||||
|
|
||||||
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
||||||
|
@ -3226,7 +3226,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
## lightweight_deletes_sync {#lightweight_deletes_sync}
|
## lightweight_deletes_sync {#lightweight_deletes_sync}
|
||||||
|
|
||||||
The same as 'mutation_sync', but controls only execution of lightweight deletes.
|
The same as [`mutations_sync`](#mutations_sync), but controls only execution of lightweight deletes.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
|
@ -499,7 +499,7 @@ Required parameters:
|
|||||||
- `type` — `encrypted`. Otherwise the encrypted disk is not created.
|
- `type` — `encrypted`. Otherwise the encrypted disk is not created.
|
||||||
- `disk` — Type of disk for data storage.
|
- `disk` — Type of disk for data storage.
|
||||||
- `key` — The key for encryption and decryption. Type: [Uint64](/docs/en/sql-reference/data-types/int-uint.md). You can use `key_hex` parameter to encode the key in hexadecimal form.
|
- `key` — The key for encryption and decryption. Type: [Uint64](/docs/en/sql-reference/data-types/int-uint.md). You can use `key_hex` parameter to encode the key in hexadecimal form.
|
||||||
You can specify multiple keys using the `id` attribute (see example above).
|
You can specify multiple keys using the `id` attribute (see example below).
|
||||||
|
|
||||||
Optional parameters:
|
Optional parameters:
|
||||||
|
|
||||||
|
@ -47,13 +47,15 @@ keeper foo bar
|
|||||||
|
|
||||||
- `ls '[path]'` -- Lists the nodes for the given path (default: cwd)
|
- `ls '[path]'` -- Lists the nodes for the given path (default: cwd)
|
||||||
- `cd '[path]'` -- Changes the working path (default `.`)
|
- `cd '[path]'` -- Changes the working path (default `.`)
|
||||||
|
- `cp '<src>' '<dest>'` -- Copies 'src' node to 'dest' path
|
||||||
|
- `mv '<src>' '<dest>'` -- Moves 'src' node to the 'dest' path
|
||||||
- `exists '<path>'` -- Returns `1` if node exists, `0` otherwise
|
- `exists '<path>'` -- Returns `1` if node exists, `0` otherwise
|
||||||
- `set '<path>' <value> [version]` -- Updates the node's value. Only updates if version matches (default: -1)
|
- `set '<path>' <value> [version]` -- Updates the node's value. Only updates if version matches (default: -1)
|
||||||
- `create '<path>' <value> [mode]` -- Creates new node with the set value
|
- `create '<path>' <value> [mode]` -- Creates new node with the set value
|
||||||
- `touch '<path>'` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
|
- `touch '<path>'` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
|
||||||
- `get '<path>'` -- Returns the node's value
|
- `get '<path>'` -- Returns the node's value
|
||||||
- `rm '<path>' [version]` -- Removes the node only if version matches (default: -1)
|
- `rm '<path>' [version]` -- Removes the node only if version matches (default: -1)
|
||||||
- `rmr '<path>'` -- Recursively deletes path. Confirmation required
|
- `rmr '<path>' [limit]` -- Recursively deletes path if the subtree size is smaller than the limit. Confirmation required (default limit = 100)
|
||||||
- `flwc <command>` -- Executes four-letter-word command
|
- `flwc <command>` -- Executes four-letter-word command
|
||||||
- `help` -- Prints this message
|
- `help` -- Prints this message
|
||||||
- `get_direct_children_number '[path]'` -- Get numbers of direct children nodes under a specific path
|
- `get_direct_children_number '[path]'` -- Get numbers of direct children nodes under a specific path
|
||||||
|
@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/aggregate-functions/reference/distinctdynamictypes
|
||||||
|
sidebar_position: 215
|
||||||
|
---
|
||||||
|
|
||||||
|
# distinctDynamicTypes
|
||||||
|
|
||||||
|
Calculates the list of distinct data types stored in [Dynamic](../../data-types/dynamic.md) column.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
distinctDynamicTypes(dynamic)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `dynamic` — [Dynamic](../../data-types/dynamic.md) column.
|
||||||
|
|
||||||
|
**Returned Value**
|
||||||
|
|
||||||
|
- The sorted list of data type names [Array(String)](../../data-types/array.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP TABLE IF EXISTS test_dynamic;
|
||||||
|
CREATE TABLE test_dynamic(d Dynamic) ENGINE = Memory;
|
||||||
|
INSERT INTO test_dynamic VALUES (42), (NULL), ('Hello'), ([1, 2, 3]), ('2020-01-01'), (map(1, 2)), (43), ([4, 5]), (NULL), ('World'), (map(3, 4))
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT distinctDynamicTypes(d) FROM test_dynamic;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```reference
|
||||||
|
┌─distinctDynamicTypes(d)──────────────────────────────────────┐
|
||||||
|
│ ['Array(Int64)','Date','Int64','Map(UInt8, UInt8)','String'] │
|
||||||
|
└──────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
@ -0,0 +1,125 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/aggregate-functions/reference/distinctjsonpaths
|
||||||
|
sidebar_position: 216
|
||||||
|
---
|
||||||
|
|
||||||
|
# distinctJSONPaths
|
||||||
|
|
||||||
|
Calculates the list of distinct paths stored in [JSON](../../data-types/newjson.md) column.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
distinctJSONPaths(json)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `json` — [JSON](../../data-types/newjson.md) column.
|
||||||
|
|
||||||
|
**Returned Value**
|
||||||
|
|
||||||
|
- The sorted list of paths [Array(String)](../../data-types/array.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP TABLE IF EXISTS test_json;
|
||||||
|
CREATE TABLE test_json(json JSON) ENGINE = Memory;
|
||||||
|
INSERT INTO test_json VALUES ('{"a" : 42, "b" : "Hello"}'), ('{"b" : [1, 2, 3], "c" : {"d" : {"e" : "2020-01-01"}}}'), ('{"a" : 43, "c" : {"d" : {"f" : [{"g" : 42}]}}}')
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT distinctJSONPaths(json) FROM test_json;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```reference
|
||||||
|
┌─distinctJSONPaths(json)───┐
|
||||||
|
│ ['a','b','c.d.e','c.d.f'] │
|
||||||
|
└───────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
# distinctJSONPathsAndTypes
|
||||||
|
|
||||||
|
Calculates the list of distinct paths and their types stored in [JSON](../../data-types/newjson.md) column.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
distinctJSONPathsAndTypes(json)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `json` — [JSON](../../data-types/newjson.md) column.
|
||||||
|
|
||||||
|
**Returned Value**
|
||||||
|
|
||||||
|
- The sorted map of paths and types [Map(String, Array(String))](../../data-types/map.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP TABLE IF EXISTS test_json;
|
||||||
|
CREATE TABLE test_json(json JSON) ENGINE = Memory;
|
||||||
|
INSERT INTO test_json VALUES ('{"a" : 42, "b" : "Hello"}'), ('{"b" : [1, 2, 3], "c" : {"d" : {"e" : "2020-01-01"}}}'), ('{"a" : 43, "c" : {"d" : {"f" : [{"g" : 42}]}}}')
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT distinctJSONPathsAndTypes(json) FROM test_json;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```reference
|
||||||
|
┌─distinctJSONPathsAndTypes(json)───────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ {'a':['Int64'],'b':['Array(Nullable(Int64))','String'],'c.d.e':['Date'],'c.d.f':['Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))']} │
|
||||||
|
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note**
|
||||||
|
|
||||||
|
If JSON declaration contains paths with specified types, these paths will be always included in the result of `distinctJSONPaths/distinctJSONPathsAndTypes` functions even if input data didn't have values for these paths.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP TABLE IF EXISTS test_json;
|
||||||
|
CREATE TABLE test_json(json JSON(a UInt32)) ENGINE = Memory;
|
||||||
|
INSERT INTO test_json VALUES ('{"b" : "Hello"}'), ('{"b" : "World", "c" : [1, 2, 3]}');
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT json FROM test_json;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─json──────────────────────────────────┐
|
||||||
|
│ {"a":0,"b":"Hello"} │
|
||||||
|
│ {"a":0,"b":"World","c":["1","2","3"]} │
|
||||||
|
└───────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT distinctJSONPaths(json) FROM test_json;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─distinctJSONPaths(json)─┐
|
||||||
|
│ ['a','b','c'] │
|
||||||
|
└─────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT distinctJSONPathsAndTypes(json) FROM test_json;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─distinctJSONPathsAndTypes(json)────────────────────────────────┐
|
||||||
|
│ {'a':['UInt32'],'b':['String'],'c':['Array(Nullable(Int64))']} │
|
||||||
|
└────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
@ -505,7 +505,130 @@ As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and move
|
|||||||
|
|
||||||
## Introspection functions
|
## Introspection functions
|
||||||
|
|
||||||
There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes).
|
There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes), [distinctDynamicTypes](../aggregate-functions/reference/distinctdynamictypes.md), [distinctJSONPaths and distinctJSONPathsAndTypes](../aggregate-functions/reference/distinctjsonpaths.md)
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Let's investigate the content of [GH Archive](https://www.gharchive.org/) dataset for `2020-01-01` date:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT arrayJoin(distinctJSONPaths(json)) FROM s3('s3://clickhouse-public-datasets/gharchive/original/2020-01-01-*.json.gz', JSONAsObject)
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─arrayJoin(distinctJSONPaths(json))─────────────────────────┐
|
||||||
|
│ actor.avatar_url │
|
||||||
|
│ actor.display_login │
|
||||||
|
│ actor.gravatar_id │
|
||||||
|
│ actor.id │
|
||||||
|
│ actor.login │
|
||||||
|
│ actor.url │
|
||||||
|
│ created_at │
|
||||||
|
│ id │
|
||||||
|
│ org.avatar_url │
|
||||||
|
│ org.gravatar_id │
|
||||||
|
│ org.id │
|
||||||
|
│ org.login │
|
||||||
|
│ org.url │
|
||||||
|
│ payload.action │
|
||||||
|
│ payload.before │
|
||||||
|
│ payload.comment._links.html.href │
|
||||||
|
│ payload.comment._links.pull_request.href │
|
||||||
|
│ payload.comment._links.self.href │
|
||||||
|
│ payload.comment.author_association │
|
||||||
|
│ payload.comment.body │
|
||||||
|
│ payload.comment.commit_id │
|
||||||
|
│ payload.comment.created_at │
|
||||||
|
│ payload.comment.diff_hunk │
|
||||||
|
│ payload.comment.html_url │
|
||||||
|
│ payload.comment.id │
|
||||||
|
│ payload.comment.in_reply_to_id │
|
||||||
|
│ payload.comment.issue_url │
|
||||||
|
│ payload.comment.line │
|
||||||
|
│ payload.comment.node_id │
|
||||||
|
│ payload.comment.original_commit_id │
|
||||||
|
│ payload.comment.original_position │
|
||||||
|
│ payload.comment.path │
|
||||||
|
│ payload.comment.position │
|
||||||
|
│ payload.comment.pull_request_review_id │
|
||||||
|
...
|
||||||
|
│ payload.release.node_id │
|
||||||
|
│ payload.release.prerelease │
|
||||||
|
│ payload.release.published_at │
|
||||||
|
│ payload.release.tag_name │
|
||||||
|
│ payload.release.tarball_url │
|
||||||
|
│ payload.release.target_commitish │
|
||||||
|
│ payload.release.upload_url │
|
||||||
|
│ payload.release.url │
|
||||||
|
│ payload.release.zipball_url │
|
||||||
|
│ payload.size │
|
||||||
|
│ public │
|
||||||
|
│ repo.id │
|
||||||
|
│ repo.name │
|
||||||
|
│ repo.url │
|
||||||
|
│ type │
|
||||||
|
└─arrayJoin(distinctJSONPaths(json))─────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT arrayJoin(distinctJSONPathsAndTypes(json)) FROM s3('s3://clickhouse-public-datasets/gharchive/original/2020-01-01-*.json.gz', JSONAsObject) SETTINGS date_time_input_format='best_effort'
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┐
|
||||||
|
│ ('actor.avatar_url',['String']) │
|
||||||
|
│ ('actor.display_login',['String']) │
|
||||||
|
│ ('actor.gravatar_id',['String']) │
|
||||||
|
│ ('actor.id',['Int64']) │
|
||||||
|
│ ('actor.login',['String']) │
|
||||||
|
│ ('actor.url',['String']) │
|
||||||
|
│ ('created_at',['DateTime']) │
|
||||||
|
│ ('id',['String']) │
|
||||||
|
│ ('org.avatar_url',['String']) │
|
||||||
|
│ ('org.gravatar_id',['String']) │
|
||||||
|
│ ('org.id',['Int64']) │
|
||||||
|
│ ('org.login',['String']) │
|
||||||
|
│ ('org.url',['String']) │
|
||||||
|
│ ('payload.action',['String']) │
|
||||||
|
│ ('payload.before',['String']) │
|
||||||
|
│ ('payload.comment._links.html.href',['String']) │
|
||||||
|
│ ('payload.comment._links.pull_request.href',['String']) │
|
||||||
|
│ ('payload.comment._links.self.href',['String']) │
|
||||||
|
│ ('payload.comment.author_association',['String']) │
|
||||||
|
│ ('payload.comment.body',['String']) │
|
||||||
|
│ ('payload.comment.commit_id',['String']) │
|
||||||
|
│ ('payload.comment.created_at',['DateTime']) │
|
||||||
|
│ ('payload.comment.diff_hunk',['String']) │
|
||||||
|
│ ('payload.comment.html_url',['String']) │
|
||||||
|
│ ('payload.comment.id',['Int64']) │
|
||||||
|
│ ('payload.comment.in_reply_to_id',['Int64']) │
|
||||||
|
│ ('payload.comment.issue_url',['String']) │
|
||||||
|
│ ('payload.comment.line',['Int64']) │
|
||||||
|
│ ('payload.comment.node_id',['String']) │
|
||||||
|
│ ('payload.comment.original_commit_id',['String']) │
|
||||||
|
│ ('payload.comment.original_position',['Int64']) │
|
||||||
|
│ ('payload.comment.path',['String']) │
|
||||||
|
│ ('payload.comment.position',['Int64']) │
|
||||||
|
│ ('payload.comment.pull_request_review_id',['Int64']) │
|
||||||
|
...
|
||||||
|
│ ('payload.release.node_id',['String']) │
|
||||||
|
│ ('payload.release.prerelease',['Bool']) │
|
||||||
|
│ ('payload.release.published_at',['DateTime']) │
|
||||||
|
│ ('payload.release.tag_name',['String']) │
|
||||||
|
│ ('payload.release.tarball_url',['String']) │
|
||||||
|
│ ('payload.release.target_commitish',['String']) │
|
||||||
|
│ ('payload.release.upload_url',['String']) │
|
||||||
|
│ ('payload.release.url',['String']) │
|
||||||
|
│ ('payload.release.zipball_url',['String']) │
|
||||||
|
│ ('payload.size',['Int64']) │
|
||||||
|
│ ('public',['Bool']) │
|
||||||
|
│ ('repo.id',['Int64']) │
|
||||||
|
│ ('repo.name',['String']) │
|
||||||
|
│ ('repo.url',['String']) │
|
||||||
|
│ ('type',['String']) │
|
||||||
|
└─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## Tips for better usage of the JSON type
|
## Tips for better usage of the JSON type
|
||||||
|
|
||||||
|
@ -2035,6 +2035,7 @@ Query:
|
|||||||
SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]);
|
SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
@ -2043,6 +2044,43 @@ Result:
|
|||||||
└──────────────────────────────────────┘
|
└──────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## arrayZipUnaligned
|
||||||
|
|
||||||
|
Combines multiple arrays into a single array, allowing for unaligned arrays. The resulting array contains the corresponding elements of the source arrays grouped into tuples in the listed order of arguments.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
arrayZipUnaligned(arr1, arr2, ..., arrN)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `arrN` — [Array](../data-types/array.md).
|
||||||
|
|
||||||
|
The function can take any number of arrays of different types.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Array with elements from the source arrays grouped into [tuples](../data-types/tuple.md). Data types in the tuple are the same as types of the input arrays and in the same order as arrays are passed. [Array](../data-types/array.md). If the arrays have different sizes, the shorter arrays will be padded with `null` values.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT arrayZipUnaligned(['a'], [1, 2, 3]);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─arrayZipUnaligned(['a'], [1, 2, 3])─┐
|
||||||
|
│ [('a',1),(NULL,2),(NULL,3)] │
|
||||||
|
└─────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## arrayAUC
|
## arrayAUC
|
||||||
|
|
||||||
Calculate AUC (Area Under the Curve, which is a concept in machine learning, see more details: <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve>).
|
Calculate AUC (Area Under the Curve, which is a concept in machine learning, see more details: <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve>).
|
||||||
|
@ -1617,45 +1617,348 @@ The calculation is performed relative to specific points in time:
|
|||||||
|
|
||||||
If unit `WEEK` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday.
|
If unit `WEEK` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday.
|
||||||
|
|
||||||
**See Also**
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toStartOfInterval(value, INTERVAL x unit[, time_zone])
|
||||||
|
toStartOfInterval(value, INTERVAL x unit[, origin[, time_zone]])
|
||||||
|
```
|
||||||
|
|
||||||
|
The second overload emulates TimescaleDB's `time_bucket()` function, respectively PostgreSQL's `date_bin()` function, e.g.
|
||||||
|
|
||||||
|
``` SQL
|
||||||
|
SELECT toStartOfInterval(toDateTime('2023-01-01 14:45:00'), INTERVAL 1 MINUTE, toDateTime('2023-01-01 14:35:30'));
|
||||||
|
```
|
||||||
|
**See Also**
|
||||||
- [date_trunc](#date_trunc)
|
- [date_trunc](#date_trunc)
|
||||||
|
|
||||||
## toTime
|
## toTime
|
||||||
|
|
||||||
Converts a date with time to a certain fixed date, while preserving the time.
|
Converts a date with time to a certain fixed date, while preserving the time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toTime(date[,timezone])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date` — Date to convert to a time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
- `timezone` (optional) — Timezone for the returned value. [String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- DateTime with date equated to `1970-01-02` while preserving the time. [DateTime](../data-types/datetime.md).
|
||||||
|
|
||||||
|
:::note
|
||||||
|
If the `date` input argument contained sub-second components,
|
||||||
|
they will be dropped in the returned `DateTime` value with second-accuracy.
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toTime(toDateTime64('1970-12-10 01:20:30.3000',3)) AS result, toTypeName(result);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──────────────result─┬─toTypeName(result)─┐
|
||||||
|
│ 1970-01-02 01:20:30 │ DateTime │
|
||||||
|
└─────────────────────┴────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toRelativeYearNum
|
## toRelativeYearNum
|
||||||
|
|
||||||
Converts a date, or date with time, to the number of the year, starting from a certain fixed point in the past.
|
Converts a date, or date with time, to the number of years elapsed since a certain fixed point in the past.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toRelativeYearNum(date)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The number of years from a fixed reference point in the past. [UInt16](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toRelativeYearNum(toDate('2002-12-08')) AS y1,
|
||||||
|
toRelativeYearNum(toDate('2010-10-26')) AS y2
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌───y1─┬───y2─┐
|
||||||
|
│ 2002 │ 2010 │
|
||||||
|
└──────┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toRelativeQuarterNum
|
## toRelativeQuarterNum
|
||||||
|
|
||||||
Converts a date, or date with time, to the number of the quarter, starting from a certain fixed point in the past.
|
Converts a date, or date with time, to the number of quarters elapsed since a certain fixed point in the past.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toRelativeQuarterNum(date)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The number of quarters from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toRelativeQuarterNum(toDate('1993-11-25')) AS q1,
|
||||||
|
toRelativeQuarterNum(toDate('2005-01-05')) AS q2
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌───q1─┬───q2─┐
|
||||||
|
│ 7975 │ 8020 │
|
||||||
|
└──────┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toRelativeMonthNum
|
## toRelativeMonthNum
|
||||||
|
|
||||||
Converts a date, or date with time, to the number of the month, starting from a certain fixed point in the past.
|
Converts a date, or date with time, to the number of months elapsed since a certain fixed point in the past.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toRelativeMonthNum(date)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The number of months from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toRelativeMonthNum(toDate('2001-04-25')) AS m1,
|
||||||
|
toRelativeMonthNum(toDate('2009-07-08')) AS m2
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌────m1─┬────m2─┐
|
||||||
|
│ 24016 │ 24115 │
|
||||||
|
└───────┴───────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toRelativeWeekNum
|
## toRelativeWeekNum
|
||||||
|
|
||||||
Converts a date, or date with time, to the number of the week, starting from a certain fixed point in the past.
|
Converts a date, or date with time, to the number of weeks elapsed since a certain fixed point in the past.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toRelativeWeekNum(date)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The number of weeks from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toRelativeWeekNum(toDate('2000-02-29')) AS w1,
|
||||||
|
toRelativeWeekNum(toDate('2001-01-12')) AS w2
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌───w1─┬───w2─┐
|
||||||
|
│ 1574 │ 1619 │
|
||||||
|
└──────┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toRelativeDayNum
|
## toRelativeDayNum
|
||||||
|
|
||||||
Converts a date, or date with time, to the number of the day, starting from a certain fixed point in the past.
|
Converts a date, or date with time, to the number of days elapsed since a certain fixed point in the past.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toRelativeDayNum(date)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The number of days from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toRelativeDayNum(toDate('1993-10-05')) AS d1,
|
||||||
|
toRelativeDayNum(toDate('2000-09-20')) AS d2
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌───d1─┬────d2─┐
|
||||||
|
│ 8678 │ 11220 │
|
||||||
|
└──────┴───────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toRelativeHourNum
|
## toRelativeHourNum
|
||||||
|
|
||||||
Converts a date, or date with time, to the number of the hour, starting from a certain fixed point in the past.
|
Converts a date, or date with time, to the number of hours elapsed since a certain fixed point in the past.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toRelativeHourNum(date)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The number of hours from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toRelativeHourNum(toDateTime('1993-10-05 05:20:36')) AS h1,
|
||||||
|
toRelativeHourNum(toDateTime('2000-09-20 14:11:29')) AS h2
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─────h1─┬─────h2─┐
|
||||||
|
│ 208276 │ 269292 │
|
||||||
|
└────────┴────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toRelativeMinuteNum
|
## toRelativeMinuteNum
|
||||||
|
|
||||||
Converts a date, or date with time, to the number of the minute, starting from a certain fixed point in the past.
|
Converts a date, or date with time, to the number of minutes elapsed since a certain fixed point in the past.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toRelativeMinuteNum(date)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The number of minutes from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toRelativeMinuteNum(toDateTime('1993-10-05 05:20:36')) AS m1,
|
||||||
|
toRelativeMinuteNum(toDateTime('2000-09-20 14:11:29')) AS m2
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌───────m1─┬───────m2─┐
|
||||||
|
│ 12496580 │ 16157531 │
|
||||||
|
└──────────┴──────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toRelativeSecondNum
|
## toRelativeSecondNum
|
||||||
|
|
||||||
Converts a date, or date with time, to the number of the second, starting from a certain fixed point in the past.
|
Converts a date, or date with time, to the number of the seconds elapsed since a certain fixed point in the past.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toRelativeSecondNum(date)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The number of seconds from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toRelativeSecondNum(toDateTime('1993-10-05 05:20:36')) AS s1,
|
||||||
|
toRelativeSecondNum(toDateTime('2000-09-20 14:11:29')) AS s2
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌────────s1─┬────────s2─┐
|
||||||
|
│ 749794836 │ 969451889 │
|
||||||
|
└───────────┴───────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toISOYear
|
## toISOYear
|
||||||
|
|
||||||
@ -3884,19 +4187,29 @@ Result:
|
|||||||
└───────────────────────────────────────────────────────────────────────┘
|
└───────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## timeSlots(StartTime, Duration,\[, Size\])
|
## timeSlots
|
||||||
|
|
||||||
For a time interval starting at ‘StartTime’ and continuing for ‘Duration’ seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the ‘Size’ in seconds. ‘Size’ is an optional parameter set to 1800 (30 minutes) by default.
|
For a time interval starting at ‘StartTime’ and continuing for ‘Duration’ seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the ‘Size’ in seconds. ‘Size’ is an optional parameter set to 1800 (30 minutes) by default.
|
||||||
This is necessary, for example, when searching for pageviews in the corresponding session.
|
This is necessary, for example, when searching for pageviews in the corresponding session.
|
||||||
Accepts DateTime and DateTime64 as ’StartTime’ argument. For DateTime, ’Duration’ and ’Size’ arguments must be `UInt32`. For ’DateTime64’ they must be `Decimal64`.
|
Accepts DateTime and DateTime64 as ’StartTime’ argument. For DateTime, ’Duration’ and ’Size’ arguments must be `UInt32`. For ’DateTime64’ they must be `Decimal64`.
|
||||||
Returns an array of DateTime/DateTime64 (return type matches the type of ’StartTime’). For DateTime64, the return value's scale can differ from the scale of ’StartTime’ --- the highest scale among all given arguments is taken.
|
Returns an array of DateTime/DateTime64 (return type matches the type of ’StartTime’). For DateTime64, the return value's scale can differ from the scale of ’StartTime’ --- the highest scale among all given arguments is taken.
|
||||||
|
|
||||||
Example:
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
timeSlots(StartTime, Duration,\[, Size\])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
|
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
|
||||||
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
|
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
|
||||||
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
|
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
|
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
|
||||||
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │
|
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │
|
||||||
|
@ -20,10 +20,10 @@ overlay(s, replace, offset[, length])
|
|||||||
|
|
||||||
**Parameters**
|
**Parameters**
|
||||||
|
|
||||||
- `input`: A string type [String](../data-types/string.md).
|
- `s`: A string type [String](../data-types/string.md).
|
||||||
- `replace`: A string type [String](../data-types/string.md).
|
- `replace`: A string type [String](../data-types/string.md).
|
||||||
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
|
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the string `s`.
|
||||||
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of bytes removed from `input` equals the length of `replace`; otherwise `length` bytes are removed.
|
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of bytes removed from `s` equals the length of `replace`; otherwise `length` bytes are removed.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -32,22 +32,35 @@ overlay(s, replace, offset[, length])
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT overlay('ClickHouse SQL', 'CORE', 12) AS res;
|
SELECT overlay('My father is from Mexico.', 'mother', 4) AS res;
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
┌─res─────────────┐
|
┌─res──────────────────────┐
|
||||||
│ ClickHouse CORE │
|
│ My mother is from Mexico.│
|
||||||
└─────────────────┘
|
└──────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT overlay('My father is from Mexico.', 'dad', 4, 6) AS res;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─res───────────────────┐
|
||||||
|
│ My dad is from Mexico.│
|
||||||
|
└───────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## overlayUTF8
|
## overlayUTF8
|
||||||
|
|
||||||
Replace part of the string `input` with another string `replace`, starting at the 1-based index `offset`.
|
Replace part of the string `input` with another string `replace`, starting at the 1-based index `offset`.
|
||||||
|
|
||||||
Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
Assumes that the string contains valid UTF-8 encoded text.
|
||||||
|
If this assumption is violated, no exception is thrown and the result is undefined.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -59,8 +72,8 @@ overlayUTF8(s, replace, offset[, length])
|
|||||||
|
|
||||||
- `s`: A string type [String](../data-types/string.md).
|
- `s`: A string type [String](../data-types/string.md).
|
||||||
- `replace`: A string type [String](../data-types/string.md).
|
- `replace`: A string type [String](../data-types/string.md).
|
||||||
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
|
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the input string `s`.
|
||||||
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of characters removed from `input` equals the length of `replace`; otherwise `length` characters are removed.
|
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of characters removed from `s` equals the length of `replace`; otherwise `length` characters are removed.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -69,15 +82,15 @@ overlayUTF8(s, replace, offset[, length])
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT overlayUTF8('ClickHouse是一款OLAP数据库', '开源', 12, 2) AS res;
|
SELECT overlay('Mein Vater ist aus Österreich.', 'der Türkei', 20) AS res;
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
┌─res────────────────────────┐
|
┌─res───────────────────────────┐
|
||||||
│ ClickHouse是开源OLAP数据库 │
|
│ Mein Vater ist aus der Türkei.│
|
||||||
└────────────────────────────┘
|
└───────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## replaceOne
|
## replaceOne
|
||||||
|
@ -3906,7 +3906,7 @@ Result:
|
|||||||
|
|
||||||
## toDateTime64
|
## toDateTime64
|
||||||
|
|
||||||
Converts the argument to the [DateTime64](../data-types/datetime64.md) data type.
|
Converts an input value to a value of type [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -3918,7 +3918,7 @@ toDateTime64(expr, scale, [timezone])
|
|||||||
|
|
||||||
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||||
- `timezone` - Time zone of the specified datetime64 object.
|
- `timezone` (optional) - Time zone of the specified datetime64 object.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -3977,10 +3977,137 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN
|
|||||||
|
|
||||||
## toDateTime64OrZero
|
## toDateTime64OrZero
|
||||||
|
|
||||||
|
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns the min value of [DateTime64](../data-types/datetime64.md) if an invalid argument is received.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toDateTime64OrZero(expr, scale, [timezone])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||||
|
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||||
|
- `timezone` (optional) - Time zone of the specified DateTime64 object.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64`: `1970-01-01 01:00:00.000`. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toDateTime64OrZero('2008-10-12 00:00:00 00:30:30', 3) AS invalid_arg
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─────────────invalid_arg─┐
|
||||||
|
│ 1970-01-01 01:00:00.000 │
|
||||||
|
└─────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [toDateTime64](#todatetime64).
|
||||||
|
- [toDateTime64OrNull](#todatetime64ornull).
|
||||||
|
- [toDateTime64OrDefault](#todatetime64ordefault).
|
||||||
|
|
||||||
## toDateTime64OrNull
|
## toDateTime64OrNull
|
||||||
|
|
||||||
|
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns `NULL` if an invalid argument is received.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toDateTime64OrNull(expr, scale, [timezone])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||||
|
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||||
|
- `timezone` (optional) - Time zone of the specified DateTime64 object.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A calendar date and time of day, with sub-second precision, otherwise `NULL`. [DateTime64](../data-types/datetime64.md)/[NULL](../data-types/nullable.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toDateTime64OrNull('1976-10-18 00:00:00.30', 3) AS valid_arg,
|
||||||
|
toDateTime64OrNull('1976-10-18 00:00:00 30', 3) AS invalid_arg
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌───────────────valid_arg─┬─invalid_arg─┐
|
||||||
|
│ 1976-10-18 00:00:00.300 │ ᴺᵁᴸᴸ │
|
||||||
|
└─────────────────────────┴─────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [toDateTime64](#todatetime64).
|
||||||
|
- [toDateTime64OrZero](#todatetime64orzero).
|
||||||
|
- [toDateTime64OrDefault](#todatetime64ordefault).
|
||||||
|
|
||||||
## toDateTime64OrDefault
|
## toDateTime64OrDefault
|
||||||
|
|
||||||
|
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md),
|
||||||
|
but returns either the default value of [DateTime64](../data-types/datetime64.md)
|
||||||
|
or the provided default if an invalid argument is received.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toDateTime64OrNull(expr, scale, [timezone, default])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||||
|
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||||
|
- `timezone` (optional) - Time zone of the specified DateTime64 object.
|
||||||
|
- `default` (optional) - Default value to return if an invalid argument is received. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64` or the `default` value if provided. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toDateTime64OrDefault('1976-10-18 00:00:00 30', 3) AS invalid_arg,
|
||||||
|
toDateTime64OrDefault('1976-10-18 00:00:00 30', 3, 'UTC', toDateTime64('2001-01-01 00:00:00.00',3)) AS invalid_arg_with_default
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─────────────invalid_arg─┬─invalid_arg_with_default─┐
|
||||||
|
│ 1970-01-01 01:00:00.000 │ 2000-12-31 23:00:00.000 │
|
||||||
|
└─────────────────────────┴──────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [toDateTime64](#todatetime64).
|
||||||
|
- [toDateTime64OrZero](#todatetime64orzero).
|
||||||
|
- [toDateTime64OrNull](#todatetime64ornull).
|
||||||
|
|
||||||
## toDecimal32
|
## toDecimal32
|
||||||
|
|
||||||
Converts an input value to a value of type [`Decimal(9, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error.
|
Converts an input value to a value of type [`Decimal(9, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error.
|
||||||
|
@ -265,8 +265,6 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL '4' day + INTERV
|
|||||||
└─────────────────────┴────────────────────────────────────────────────────────────┘
|
└─────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
You can work with dates without using `INTERVAL`, just by adding or subtracting seconds, minutes, and hours. For example, an interval of one day can be set by adding `60*60*24`.
|
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
The `INTERVAL` syntax or `addDays` function are always preferred. Simple addition or subtraction (syntax like `now() + ...`) doesn't consider time settings. For example, daylight saving time.
|
The `INTERVAL` syntax or `addDays` function are always preferred. Simple addition or subtraction (syntax like `now() + ...`) doesn't consider time settings. For example, daylight saving time.
|
||||||
:::
|
:::
|
||||||
|
@ -351,7 +351,7 @@ ALTER TABLE mt DELETE IN PARTITION ID '2' WHERE p = 2;
|
|||||||
You can specify the partition expression in `ALTER ... PARTITION` queries in different ways:
|
You can specify the partition expression in `ALTER ... PARTITION` queries in different ways:
|
||||||
|
|
||||||
- As a value from the `partition` column of the `system.parts` table. For example, `ALTER TABLE visits DETACH PARTITION 201901`.
|
- As a value from the `partition` column of the `system.parts` table. For example, `ALTER TABLE visits DETACH PARTITION 201901`.
|
||||||
- Using the keyword `ALL`. It can be used only with DROP/DETACH/ATTACH. For example, `ALTER TABLE visits ATTACH PARTITION ALL`.
|
- Using the keyword `ALL`. It can be used only with DROP/DETACH/ATTACH/ATTACH FROM. For example, `ALTER TABLE visits ATTACH PARTITION ALL`.
|
||||||
- As a tuple of expressions or constants that matches (in types) the table partitioning keys tuple. In the case of a single element partitioning key, the expression should be wrapped in the `tuple (...)` function. For example, `ALTER TABLE visits DETACH PARTITION tuple(toYYYYMM(toDate('2019-01-25')))`.
|
- As a tuple of expressions or constants that matches (in types) the table partitioning keys tuple. In the case of a single element partitioning key, the expression should be wrapped in the `tuple (...)` function. For example, `ALTER TABLE visits DETACH PARTITION tuple(toYYYYMM(toDate('2019-01-25')))`.
|
||||||
- Using the partition ID. Partition ID is a string identifier of the partition (human-readable, if possible) that is used as the names of partitions in the file system and in ZooKeeper. The partition ID must be specified in the `PARTITION ID` clause, in a single quotes. For example, `ALTER TABLE visits DETACH PARTITION ID '201901'`.
|
- Using the partition ID. Partition ID is a string identifier of the partition (human-readable, if possible) that is used as the names of partitions in the file system and in ZooKeeper. The partition ID must be specified in the `PARTITION ID` clause, in a single quotes. For example, `ALTER TABLE visits DETACH PARTITION ID '201901'`.
|
||||||
- In the [ALTER ATTACH PART](#attach-partitionpart) and [DROP DETACHED PART](#drop-detached-partitionpart) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached_parts](/docs/en/operations/system-tables/detached_parts.md/#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`.
|
- In the [ALTER ATTACH PART](#attach-partitionpart) and [DROP DETACHED PART](#drop-detached-partitionpart) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached_parts](/docs/en/operations/system-tables/detached_parts.md/#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`.
|
||||||
|
@ -13,7 +13,7 @@ The lightweight `DELETE` statement removes rows from the table `[db.]table` that
|
|||||||
DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE expr;
|
DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE expr;
|
||||||
```
|
```
|
||||||
|
|
||||||
It is called "lightweight `DELETE`" to contrast it to the [ALTER table DELETE](/en/sql-reference/statements/alter/delete) command, which is a heavyweight process.
|
It is called "lightweight `DELETE`" to contrast it to the [ALTER TABLE ... DELETE](/en/sql-reference/statements/alter/delete) command, which is a heavyweight process.
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
@ -22,23 +22,27 @@ It is called "lightweight `DELETE`" to contrast it to the [ALTER table DELETE](/
|
|||||||
DELETE FROM hits WHERE Title LIKE '%hello%';
|
DELETE FROM hits WHERE Title LIKE '%hello%';
|
||||||
```
|
```
|
||||||
|
|
||||||
## Lightweight `DELETE` does not delete data from storage immediately
|
## Lightweight `DELETE` does not delete data immediately
|
||||||
|
|
||||||
With lightweight `DELETE`, deleted rows are internally marked as deleted immediately and will be automatically filtered out of all subsequent queries. However, cleanup of data happens during the next merge. As a result, it is possible that for an unspecified period, data is not actually deleted from storage and is only marked as deleted.
|
Lightweight `DELETE` is implemented as a [mutation](/en/sql-reference/statements/alter#mutations) that marks rows as deleted but does not immediately physically delete them.
|
||||||
|
|
||||||
If you need to guarantee that your data is deleted from storage in a predictable time, consider using the [ALTER table DELETE](/en/sql-reference/statements/alter/delete) command. Note that deleting data using `ALTER table DELETE` may consume significant resources as it recreates all affected parts.
|
By default, `DELETE` statements wait until marking the rows as deleted is completed before returning. This can take a long time if the amount of data is large. Alternatively, you can run it asynchronously in the background using the setting [`lightweight_deletes_sync`](/en/operations/settings/settings#lightweight_deletes_sync). If disabled, the `DELETE` statement is going to return immediately, but the data can still be visible to queries until the background mutation is finished.
|
||||||
|
|
||||||
|
The mutation does not physically delete the rows that have been marked as deleted, this will only happen during the next merge. As a result, it is possible that for an unspecified period, data is not actually deleted from storage and is only marked as deleted.
|
||||||
|
|
||||||
|
If you need to guarantee that your data is deleted from storage in a predictable time, consider using the table setting [`min_age_to_force_merge_seconds`](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds). Or you can use the [ALTER TABLE ... DELETE](/en/sql-reference/statements/alter/delete) command. Note that deleting data using `ALTER TABLE ... DELETE` may consume significant resources as it recreates all affected parts.
|
||||||
|
|
||||||
## Deleting large amounts of data
|
## Deleting large amounts of data
|
||||||
|
|
||||||
Large deletes can negatively affect ClickHouse performance. If you are attempting to delete all rows from a table, consider using the [`TRUNCATE TABLE`](/en/sql-reference/statements/truncate) command.
|
Large deletes can negatively affect ClickHouse performance. If you are attempting to delete all rows from a table, consider using the [`TRUNCATE TABLE`](/en/sql-reference/statements/truncate) command.
|
||||||
|
|
||||||
If you anticipate frequent deletes, consider using a [custom partitioning key](/en/engines/table-engines/mergetree-family/custom-partitioning-key). You can then use the [`ALTER TABLE...DROP PARTITION`](/en/sql-reference/statements/alter/partition#drop-partitionpart) command to quickly drop all rows associated with that partition.
|
If you anticipate frequent deletes, consider using a [custom partitioning key](/en/engines/table-engines/mergetree-family/custom-partitioning-key). You can then use the [`ALTER TABLE ... DROP PARTITION`](/en/sql-reference/statements/alter/partition#drop-partitionpart) command to quickly drop all rows associated with that partition.
|
||||||
|
|
||||||
## Limitations of lightweight `DELETE`
|
## Limitations of lightweight `DELETE`
|
||||||
|
|
||||||
### Lightweight `DELETE`s with projections
|
### Lightweight `DELETE`s with projections
|
||||||
|
|
||||||
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. But there is a [MergeTree setting](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` can change the behavior.
|
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. But there is a [MergeTree setting](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` to change the behavior.
|
||||||
|
|
||||||
## Performance considerations when using lightweight `DELETE`
|
## Performance considerations when using lightweight `DELETE`
|
||||||
|
|
||||||
@ -48,7 +52,7 @@ The following can also negatively impact lightweight `DELETE` performance:
|
|||||||
|
|
||||||
- A heavy `WHERE` condition in a `DELETE` query.
|
- A heavy `WHERE` condition in a `DELETE` query.
|
||||||
- If the mutations queue is filled with many other mutations, this can possibly lead to performance issues as all mutations on a table are executed sequentially.
|
- If the mutations queue is filled with many other mutations, this can possibly lead to performance issues as all mutations on a table are executed sequentially.
|
||||||
- The affected table having a very large number of data parts.
|
- The affected table has a very large number of data parts.
|
||||||
- Having a lot of data in compact parts. In a Compact part, all columns are stored in one file.
|
- Having a lot of data in compact parts. In a Compact part, all columns are stored in one file.
|
||||||
|
|
||||||
## Delete permissions
|
## Delete permissions
|
||||||
@ -61,31 +65,31 @@ GRANT ALTER DELETE ON db.table to username;
|
|||||||
|
|
||||||
## How lightweight DELETEs work internally in ClickHouse
|
## How lightweight DELETEs work internally in ClickHouse
|
||||||
|
|
||||||
1. A "mask" is applied to affected rows
|
1. **A "mask" is applied to affected rows**
|
||||||
|
|
||||||
When a `DELETE FROM table ...` query is executed, ClickHouse saves a mask where each row is marked as either “existing” or as “deleted”. Those “deleted” rows are omitted for subsequent queries. However, rows are actually only removed later by subsequent merges. Writing this mask is much more lightweight than what is done by an `ALTER table DELETE` query.
|
When a `DELETE FROM table ...` query is executed, ClickHouse saves a mask where each row is marked as either “existing” or as “deleted”. Those “deleted” rows are omitted for subsequent queries. However, rows are actually only removed later by subsequent merges. Writing this mask is much more lightweight than what is done by an `ALTER TABLE ... DELETE` query.
|
||||||
|
|
||||||
The mask is implemented as a hidden `_row_exists` system column that stores `True` for all visible rows and `False` for deleted ones. This column is only present in a part if some rows in the part were deleted. This column does not exist when a part has all values equal to `True`.
|
The mask is implemented as a hidden `_row_exists` system column that stores `True` for all visible rows and `False` for deleted ones. This column is only present in a part if some rows in the part were deleted. This column does not exist when a part has all values equal to `True`.
|
||||||
|
|
||||||
2. `SELECT` queries are transformed to include the mask
|
2. **`SELECT` queries are transformed to include the mask**
|
||||||
|
|
||||||
When a masked column is used in a query, the `SELECT ... FROM table WHERE condition` query internally is extended by the predicate on `_row_exists` and is transformed to:
|
When a masked column is used in a query, the `SELECT ... FROM table WHERE condition` query internally is extended by the predicate on `_row_exists` and is transformed to:
|
||||||
```sql
|
```sql
|
||||||
SELECT ... FROM table PREWHERE _row_exists WHERE condition
|
SELECT ... FROM table PREWHERE _row_exists WHERE condition
|
||||||
```
|
```
|
||||||
At execution time, the column `_row_exists` is read to determine which rows should not be returned. If there are many deleted rows, ClickHouse can determine which granules can be fully skipped when reading the rest of the columns.
|
At execution time, the column `_row_exists` is read to determine which rows should not be returned. If there are many deleted rows, ClickHouse can determine which granules can be fully skipped when reading the rest of the columns.
|
||||||
|
|
||||||
3. `DELETE` queries are transformed to `ALTER table UPDATE` queries
|
3. **`DELETE` queries are transformed to `ALTER TABLE ... UPDATE` queries**
|
||||||
|
|
||||||
The `DELETE FROM table WHERE condition` is translated into an `ALTER table UPDATE _row_exists = 0 WHERE condition` mutation.
|
The `DELETE FROM table WHERE condition` is translated into an `ALTER TABLE table UPDATE _row_exists = 0 WHERE condition` mutation.
|
||||||
|
|
||||||
Internally, this mutation is executed in two steps:
|
Internally, this mutation is executed in two steps:
|
||||||
|
|
||||||
1. A `SELECT count() FROM table WHERE condition` command is executed for each individual part to determine if the part is affected.
|
1. A `SELECT count() FROM table WHERE condition` command is executed for each individual part to determine if the part is affected.
|
||||||
|
|
||||||
2. Based on the commands above, affected parts are then mutated, and hardlinks are created for unaffected parts. In the case of wide parts, the `_row_exists` column for each row is updated and all other columns' files are hardlinked. For compact parts, all columns are re-written because they are all stored together in one file.
|
2. Based on the commands above, affected parts are then mutated, and hardlinks are created for unaffected parts. In the case of wide parts, the `_row_exists` column for each row is updated, and all other columns' files are hardlinked. For compact parts, all columns are re-written because they are all stored together in one file.
|
||||||
|
|
||||||
From the steps above, we can see that lightweight deletes using the masking technique improves performance over traditional `ALTER table DELETE` commands because `ALTER table DELETE` reads and re-writes all the columns' files for affected parts.
|
From the steps above, we can see that lightweight `DELETE` using the masking technique improves performance over traditional `ALTER TABLE ... DELETE` because it does not re-write all the columns' files for affected parts.
|
||||||
|
|
||||||
## Related content
|
## Related content
|
||||||
|
|
||||||
|
@ -15,7 +15,14 @@ The `FROM` clause specifies the source to read data from:
|
|||||||
|
|
||||||
Subquery is another `SELECT` query that may be specified in parenthesis inside `FROM` clause.
|
Subquery is another `SELECT` query that may be specified in parenthesis inside `FROM` clause.
|
||||||
|
|
||||||
`FROM` clause can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
|
The `FROM` can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
|
||||||
|
|
||||||
|
`FROM` can optionally appear before a `SELECT` clause. This is a ClickHouse-specific extension of standard SQL which makes `SELECT` statements easier to read. Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
FROM table
|
||||||
|
SELECT *
|
||||||
|
```
|
||||||
|
|
||||||
## FINAL Modifier
|
## FINAL Modifier
|
||||||
|
|
||||||
@ -45,19 +52,19 @@ As an alternative to using `FINAL`, it is sometimes possible to use different qu
|
|||||||
|
|
||||||
### Example Usage
|
### Example Usage
|
||||||
|
|
||||||
**Using the `FINAL` keyword**
|
Using the `FINAL` keyword
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT x, y FROM mytable FINAL WHERE x > 1;
|
SELECT x, y FROM mytable FINAL WHERE x > 1;
|
||||||
```
|
```
|
||||||
|
|
||||||
**Using `FINAL` as a query-level setting**
|
Using `FINAL` as a query-level setting
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1;
|
SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1;
|
||||||
```
|
```
|
||||||
|
|
||||||
**Using `FINAL` as a session-level setting**
|
Using `FINAL` as a session-level setting
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SET final = 1;
|
SET final = 1;
|
||||||
|
@ -8,14 +8,14 @@ slug: /en/guides/developer/transactional
|
|||||||
This is transactional (ACID) if the inserted rows are packed and inserted as a single block (see Notes):
|
This is transactional (ACID) if the inserted rows are packed and inserted as a single block (see Notes):
|
||||||
- Atomic: an INSERT succeeds or is rejected as a whole: if a confirmation is sent to the client, then all rows were inserted; if an error is sent to the client, then no rows were inserted.
|
- Atomic: an INSERT succeeds or is rejected as a whole: if a confirmation is sent to the client, then all rows were inserted; if an error is sent to the client, then no rows were inserted.
|
||||||
- Consistent: if there are no table constraints violated, then all rows in an INSERT are inserted and the INSERT succeeds; if constraints are violated, then no rows are inserted.
|
- Consistent: if there are no table constraints violated, then all rows in an INSERT are inserted and the INSERT succeeds; if constraints are violated, then no rows are inserted.
|
||||||
- Isolated: concurrent clients observe a consistent snapshot of the table–the state of the table either as it was before the INSERT attempt, or after the successful INSERT; no partial state is seen
|
- Isolated: concurrent clients observe a consistent snapshot of the table–the state of the table either as it was before the INSERT attempt, or after the successful INSERT; no partial state is seen. Clients inside of another transaction have [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation), while clients outside of a transaction have [read uncommitted](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Read_uncommitted) isolation level.
|
||||||
- Durable: a successful INSERT is written to the filesystem before answering to the client, on a single replica or multiple replicas (controlled by the `insert_quorum` setting), and ClickHouse can ask the OS to sync the filesystem data on the storage media (controlled by the `fsync_after_insert` setting).
|
- Durable: a successful INSERT is written to the filesystem before answering to the client, on a single replica or multiple replicas (controlled by the `insert_quorum` setting), and ClickHouse can ask the OS to sync the filesystem data on the storage media (controlled by the `fsync_after_insert` setting).
|
||||||
- INSERT into multiple tables with one statement is possible if materialized views are involved (the INSERT from the client is to a table which has associate materialized views).
|
- INSERT into multiple tables with one statement is possible if materialized views are involved (the INSERT from the client is to a table which has associate materialized views).
|
||||||
|
|
||||||
## Case 2: INSERT into multiple partitions, of one table, of the MergeTree* family
|
## Case 2: INSERT into multiple partitions, of one table, of the MergeTree* family
|
||||||
|
|
||||||
Same as Case 1 above, with this detail:
|
Same as Case 1 above, with this detail:
|
||||||
- If table has many partitions and INSERT covers many partitions–then insertion into every partition is transactional on its own
|
- If table has many partitions and INSERT covers many partitions, then insertion into every partition is transactional on its own
|
||||||
|
|
||||||
|
|
||||||
## Case 3: INSERT into one distributed table of the MergeTree* family
|
## Case 3: INSERT into one distributed table of the MergeTree* family
|
||||||
@ -38,7 +38,7 @@ Same as Case 1 above, with this detail:
|
|||||||
- the insert format is column-based (like Native, Parquet, ORC, etc) and the data contains only one block of data
|
- the insert format is column-based (like Native, Parquet, ORC, etc) and the data contains only one block of data
|
||||||
- the size of the inserted block in general may depend on many settings (for example: `max_block_size`, `max_insert_block_size`, `min_insert_block_size_rows`, `min_insert_block_size_bytes`, `preferred_block_size_bytes`, etc)
|
- the size of the inserted block in general may depend on many settings (for example: `max_block_size`, `max_insert_block_size`, `min_insert_block_size_rows`, `min_insert_block_size_bytes`, `preferred_block_size_bytes`, etc)
|
||||||
- if the client did not receive an answer from the server, the client does not know if the transaction succeeded, and it can repeat the transaction, using exactly-once insertion properties
|
- if the client did not receive an answer from the server, the client does not know if the transaction succeeded, and it can repeat the transaction, using exactly-once insertion properties
|
||||||
- ClickHouse is using MVCC with snapshot isolation internally
|
- ClickHouse is using [MVCC](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) with [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation) internally for concurrent transactions
|
||||||
- all ACID properties are valid even in the case of server kill/crash
|
- all ACID properties are valid even in the case of server kill/crash
|
||||||
- either insert_quorum into different AZ or fsync should be enabled to ensure durable inserts in the typical setup
|
- either insert_quorum into different AZ or fsync should be enabled to ensure durable inserts in the typical setup
|
||||||
- "consistency" in ACID terms does not cover the semantics of distributed systems, see https://jepsen.io/consistency which is controlled by different settings (select_sequential_consistency)
|
- "consistency" in ACID terms does not cover the semantics of distributed systems, see https://jepsen.io/consistency which is controlled by different settings (select_sequential_consistency)
|
||||||
@ -260,7 +260,7 @@ FROM mergetree_table
|
|||||||
### Transactions introspection
|
### Transactions introspection
|
||||||
|
|
||||||
You can inspect transactions by querying the `system.transactions` table, but note that you cannot query that
|
You can inspect transactions by querying the `system.transactions` table, but note that you cannot query that
|
||||||
table from a session that is in a transaction–open a second `clickhouse client` session to query that table.
|
table from a session that is in a transaction. Open a second `clickhouse client` session to query that table.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT *
|
SELECT *
|
||||||
|
@ -506,14 +506,23 @@ bool RMRCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & nod
|
|||||||
return false;
|
return false;
|
||||||
node->args.push_back(std::move(path));
|
node->args.push_back(std::move(path));
|
||||||
|
|
||||||
|
ASTPtr remove_nodes_limit;
|
||||||
|
if (ParserUnsignedInteger{}.parse(pos, remove_nodes_limit, expected))
|
||||||
|
node->args.push_back(remove_nodes_limit->as<ASTLiteral &>().value);
|
||||||
|
else
|
||||||
|
node->args.push_back(UInt64(100));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RMRCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
void RMRCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||||
{
|
{
|
||||||
String path = client->getAbsolutePath(query->args[0].safeGet<String>());
|
String path = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||||
|
UInt64 remove_nodes_limit = query->args[1].safeGet<UInt64>();
|
||||||
|
|
||||||
client->askConfirmation(
|
client->askConfirmation(
|
||||||
"You are going to recursively delete path " + path, [client, path] { client->zookeeper->removeRecursive(path); });
|
"You are going to recursively delete path " + path,
|
||||||
|
[client, path, remove_nodes_limit] { client->zookeeper->removeRecursive(path, static_cast<UInt32>(remove_nodes_limit)); });
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReconfigCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, DB::Expected & expected) const
|
bool ReconfigCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, DB::Expected & expected) const
|
||||||
|
@ -184,7 +184,7 @@ class RMRCommand : public IKeeperClientCommand
|
|||||||
|
|
||||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
String getHelpMessage() const override { return "{} <path> -- Recursively deletes path. Confirmation required"; }
|
String getHelpMessage() const override { return "{} <path> [limit] -- Recursively deletes path if the subtree size is smaller than the limit. Confirmation required (default limit = 100)"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class ReconfigCommand : public IKeeperClientCommand
|
class ReconfigCommand : public IKeeperClientCommand
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include <Core/ServerUUID.h>
|
#include <Core/ServerUUID.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include <Common/CgroupsMemoryUsageObserver.h>
|
#include <Common/CgroupsMemoryUsageObserver.h>
|
||||||
|
#include <Common/MemoryWorker.h>
|
||||||
#include <Common/ErrorHandlers.h>
|
#include <Common/ErrorHandlers.h>
|
||||||
#include <Common/assertProcessUserMatchesDataOwner.h>
|
#include <Common/assertProcessUserMatchesDataOwner.h>
|
||||||
#include <Common/makeSocketAddress.h>
|
#include <Common/makeSocketAddress.h>
|
||||||
@ -384,6 +385,9 @@ try
|
|||||||
LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds());
|
LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds());
|
||||||
});
|
});
|
||||||
|
|
||||||
|
MemoryWorker memory_worker(config().getUInt64("memory_worker_period_ms", 0));
|
||||||
|
memory_worker.start();
|
||||||
|
|
||||||
static ServerErrorHandler error_handler;
|
static ServerErrorHandler error_handler;
|
||||||
Poco::ErrorHandler::set(&error_handler);
|
Poco::ErrorHandler::set(&error_handler);
|
||||||
|
|
||||||
@ -425,8 +429,9 @@ try
|
|||||||
for (const auto & server : *servers)
|
for (const auto & server : *servers)
|
||||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
|
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
|
||||||
return metrics;
|
return metrics;
|
||||||
}
|
},
|
||||||
);
|
/*update_jemalloc_epoch_=*/memory_worker.getSource() != MemoryWorker::MemoryUsageSource::Jemalloc,
|
||||||
|
/*update_rss_=*/memory_worker.getSource() == MemoryWorker::MemoryUsageSource::None);
|
||||||
|
|
||||||
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
||||||
|
|
||||||
@ -655,7 +660,6 @@ try
|
|||||||
GWPAsan::initFinished();
|
GWPAsan::initFinished();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
LOG_INFO(log, "Ready for connections.");
|
LOG_INFO(log, "Ready for connections.");
|
||||||
|
|
||||||
waitForTerminationRequest();
|
waitForTerminationRequest();
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
#include <Poco/Util/HelpFormatter.h>
|
#include <Poco/Util/HelpFormatter.h>
|
||||||
#include <Poco/Environment.h>
|
#include <Poco/Environment.h>
|
||||||
#include <Poco/Config.h>
|
#include <Poco/Config.h>
|
||||||
#include <Common/Jemalloc.h>
|
|
||||||
#include <Common/scope_guard_safe.h>
|
#include <Common/scope_guard_safe.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include <base/phdr_cache.h>
|
#include <base/phdr_cache.h>
|
||||||
@ -25,6 +24,7 @@
|
|||||||
#include <base/Numa.h>
|
#include <base/Numa.h>
|
||||||
#include <Common/PoolId.h>
|
#include <Common/PoolId.h>
|
||||||
#include <Common/MemoryTracker.h>
|
#include <Common/MemoryTracker.h>
|
||||||
|
#include <Common/MemoryWorker.h>
|
||||||
#include <Common/ClickHouseRevision.h>
|
#include <Common/ClickHouseRevision.h>
|
||||||
#include <Common/DNSResolver.h>
|
#include <Common/DNSResolver.h>
|
||||||
#include <Common/CgroupsMemoryUsageObserver.h>
|
#include <Common/CgroupsMemoryUsageObserver.h>
|
||||||
@ -111,6 +111,8 @@
|
|||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
|
|
||||||
|
#include <Common/Jemalloc.h>
|
||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
#include <Common/config_version.h>
|
#include <Common/config_version.h>
|
||||||
|
|
||||||
@ -449,9 +451,12 @@ void checkForUsersNotInMainConfig(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
/// Unused in other builds
|
/// Unused in other builds
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
static String readLine(const String & path)
|
String readLine(const String & path)
|
||||||
{
|
{
|
||||||
ReadBufferFromFile in(path);
|
ReadBufferFromFile in(path);
|
||||||
String contents;
|
String contents;
|
||||||
@ -459,7 +464,7 @@ static String readLine(const String & path)
|
|||||||
return contents;
|
return contents;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int readNumber(const String & path)
|
int readNumber(const String & path)
|
||||||
{
|
{
|
||||||
ReadBufferFromFile in(path);
|
ReadBufferFromFile in(path);
|
||||||
int result;
|
int result;
|
||||||
@ -469,7 +474,7 @@ static int readNumber(const String & path)
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void sanityChecks(Server & server)
|
void sanityChecks(Server & server)
|
||||||
{
|
{
|
||||||
std::string data_path = getCanonicalPath(server.config().getString("path", DBMS_DEFAULT_PATH));
|
std::string data_path = getCanonicalPath(server.config().getString("path", DBMS_DEFAULT_PATH));
|
||||||
std::string logs_path = server.config().getString("logger.log", "");
|
std::string logs_path = server.config().getString("logger.log", "");
|
||||||
@ -590,6 +595,8 @@ static void sanityChecks(Server & server)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, ContextMutablePtr context, Poco::Logger * log)
|
void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, ContextMutablePtr context, Poco::Logger * log)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
@ -906,6 +913,8 @@ try
|
|||||||
LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds());
|
LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds());
|
||||||
});
|
});
|
||||||
|
|
||||||
|
MemoryWorker memory_worker(global_context->getServerSettings().memory_worker_period_ms);
|
||||||
|
|
||||||
/// This object will periodically calculate some metrics.
|
/// This object will periodically calculate some metrics.
|
||||||
ServerAsynchronousMetrics async_metrics(
|
ServerAsynchronousMetrics async_metrics(
|
||||||
global_context,
|
global_context,
|
||||||
@ -924,8 +933,9 @@ try
|
|||||||
for (const auto & server : servers)
|
for (const auto & server : servers)
|
||||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
|
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
|
||||||
return metrics;
|
return metrics;
|
||||||
}
|
},
|
||||||
);
|
/*update_jemalloc_epoch_=*/memory_worker.getSource() != MemoryWorker::MemoryUsageSource::Jemalloc,
|
||||||
|
/*update_rss_=*/memory_worker.getSource() == MemoryWorker::MemoryUsageSource::None);
|
||||||
|
|
||||||
/// NOTE: global context should be destroyed *before* GlobalThreadPool::shutdown()
|
/// NOTE: global context should be destroyed *before* GlobalThreadPool::shutdown()
|
||||||
/// Otherwise GlobalThreadPool::shutdown() will hang, since Context holds some threads.
|
/// Otherwise GlobalThreadPool::shutdown() will hang, since Context holds some threads.
|
||||||
@ -1204,6 +1214,8 @@ try
|
|||||||
|
|
||||||
FailPointInjection::enableFromGlobalConfig(config());
|
FailPointInjection::enableFromGlobalConfig(config());
|
||||||
|
|
||||||
|
memory_worker.start();
|
||||||
|
|
||||||
int default_oom_score = 0;
|
int default_oom_score = 0;
|
||||||
|
|
||||||
#if !defined(NDEBUG)
|
#if !defined(NDEBUG)
|
||||||
@ -1547,15 +1559,6 @@ try
|
|||||||
total_memory_tracker.setDescription("(total)");
|
total_memory_tracker.setDescription("(total)");
|
||||||
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
||||||
|
|
||||||
if (cgroups_memory_usage_observer)
|
|
||||||
{
|
|
||||||
double hard_limit_ratio = new_server_settings.cgroup_memory_watcher_hard_limit_ratio;
|
|
||||||
double soft_limit_ratio = new_server_settings.cgroup_memory_watcher_soft_limit_ratio;
|
|
||||||
cgroups_memory_usage_observer->setMemoryUsageLimits(
|
|
||||||
static_cast<uint64_t>(max_server_memory_usage * hard_limit_ratio),
|
|
||||||
static_cast<uint64_t>(max_server_memory_usage * soft_limit_ratio));
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t merges_mutations_memory_usage_soft_limit = new_server_settings.merges_mutations_memory_usage_soft_limit;
|
size_t merges_mutations_memory_usage_soft_limit = new_server_settings.merges_mutations_memory_usage_soft_limit;
|
||||||
|
|
||||||
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(current_physical_server_memory * new_server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(current_physical_server_memory * new_server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
||||||
@ -1584,8 +1587,6 @@ try
|
|||||||
background_memory_tracker.setDescription("(background)");
|
background_memory_tracker.setDescription("(background)");
|
||||||
background_memory_tracker.setMetric(CurrentMetrics::MergesMutationsMemoryTracking);
|
background_memory_tracker.setMetric(CurrentMetrics::MergesMutationsMemoryTracking);
|
||||||
|
|
||||||
total_memory_tracker.setAllowUseJemallocMemory(new_server_settings.allow_use_jemalloc_memory);
|
|
||||||
|
|
||||||
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
|
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
|
||||||
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);
|
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);
|
||||||
|
|
||||||
|
@ -239,15 +239,36 @@ bool Authentication::areCredentialsValid(
|
|||||||
throw Authentication::Require<GSSAcceptorContext>(auth_data.getKerberosRealm());
|
throw Authentication::Require<GSSAcceptorContext>(auth_data.getKerberosRealm());
|
||||||
|
|
||||||
case AuthenticationType::SSL_CERTIFICATE:
|
case AuthenticationType::SSL_CERTIFICATE:
|
||||||
|
{
|
||||||
for (SSLCertificateSubjects::Type type : {SSLCertificateSubjects::Type::CN, SSLCertificateSubjects::Type::SAN})
|
for (SSLCertificateSubjects::Type type : {SSLCertificateSubjects::Type::CN, SSLCertificateSubjects::Type::SAN})
|
||||||
{
|
{
|
||||||
for (const auto & subject : auth_data.getSSLCertificateSubjects().at(type))
|
for (const auto & subject : auth_data.getSSLCertificateSubjects().at(type))
|
||||||
{
|
{
|
||||||
if (ssl_certificate_credentials->getSSLCertificateSubjects().at(type).contains(subject))
|
if (ssl_certificate_credentials->getSSLCertificateSubjects().at(type).contains(subject))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
// Wildcard support (1 only)
|
||||||
|
if (subject.contains('*'))
|
||||||
|
{
|
||||||
|
auto prefix = std::string_view(subject).substr(0, subject.find('*'));
|
||||||
|
auto suffix = std::string_view(subject).substr(subject.find('*') + 1);
|
||||||
|
auto slashes = std::count(subject.begin(), subject.end(), '/');
|
||||||
|
|
||||||
|
for (const auto & certificate_subject : ssl_certificate_credentials->getSSLCertificateSubjects().at(type))
|
||||||
|
{
|
||||||
|
bool matches_wildcard = certificate_subject.starts_with(prefix) && certificate_subject.ends_with(suffix);
|
||||||
|
|
||||||
|
// '*' must not represent a '/' in URI, so check if the number of '/' are equal
|
||||||
|
bool matches_slashes = slashes == count(certificate_subject.begin(), certificate_subject.end(), '/');
|
||||||
|
|
||||||
|
if (matches_wildcard && matches_slashes)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
case AuthenticationType::SSH_KEY:
|
case AuthenticationType::SSH_KEY:
|
||||||
#if USE_SSH
|
#if USE_SSH
|
||||||
|
161
src/AggregateFunctions/AggregateFunctionDistinctDynamicTypes.cpp
Normal file
161
src/AggregateFunctions/AggregateFunctionDistinctDynamicTypes.cpp
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
#include <unordered_set>
|
||||||
|
#include <IO/WriteHelpers.h>
|
||||||
|
#include <IO/ReadHelpers.h>
|
||||||
|
|
||||||
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
#include <DataTypes/DataTypeArray.h>
|
||||||
|
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||||
|
#include <Columns/ColumnDynamic.h>
|
||||||
|
|
||||||
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
|
#include <AggregateFunctions/FactoryHelpers.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
extern const int TOO_LARGE_ARRAY_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct AggregateFunctionDistinctDynamicTypesData
|
||||||
|
{
|
||||||
|
constexpr static size_t MAX_ARRAY_SIZE = 0xFFFFFF;
|
||||||
|
|
||||||
|
std::unordered_set<String> data;
|
||||||
|
|
||||||
|
void add(const String & type)
|
||||||
|
{
|
||||||
|
data.insert(type);
|
||||||
|
}
|
||||||
|
|
||||||
|
void merge(const AggregateFunctionDistinctDynamicTypesData & other)
|
||||||
|
{
|
||||||
|
data.insert(other.data.begin(), other.data.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
void serialize(WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeVarUInt(data.size(), buf);
|
||||||
|
for (const auto & type : data)
|
||||||
|
writeStringBinary(type, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void deserialize(ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
size_t size;
|
||||||
|
readVarUInt(size, buf);
|
||||||
|
if (size > MAX_ARRAY_SIZE)
|
||||||
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size (maximum: {}): {}", MAX_ARRAY_SIZE, size);
|
||||||
|
|
||||||
|
data.reserve(size);
|
||||||
|
String type;
|
||||||
|
for (size_t i = 0; i != size; ++i)
|
||||||
|
{
|
||||||
|
readStringBinary(type, buf);
|
||||||
|
data.insert(type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void insertResultInto(IColumn & column)
|
||||||
|
{
|
||||||
|
/// Insert types in sorted order for better output.
|
||||||
|
auto & array_column = assert_cast<ColumnArray &>(column);
|
||||||
|
auto & string_column = assert_cast<ColumnString &>(array_column.getData());
|
||||||
|
std::vector<String> sorted_data(data.begin(), data.end());
|
||||||
|
std::sort(sorted_data.begin(), sorted_data.end());
|
||||||
|
for (const auto & type : sorted_data)
|
||||||
|
string_column.insertData(type.data(), type.size());
|
||||||
|
array_column.getOffsets().push_back(string_column.size());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Calculates the list of distinct data types in Dynamic column.
|
||||||
|
class AggregateFunctionDistinctDynamicTypes final : public IAggregateFunctionDataHelper<AggregateFunctionDistinctDynamicTypesData, AggregateFunctionDistinctDynamicTypes>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit AggregateFunctionDistinctDynamicTypes(const DataTypes & argument_types_)
|
||||||
|
: IAggregateFunctionDataHelper<AggregateFunctionDistinctDynamicTypesData, AggregateFunctionDistinctDynamicTypes>(argument_types_, {}, std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
String getName() const override { return "distinctDynamicTypes"; }
|
||||||
|
|
||||||
|
bool allocatesMemoryInArena() const override { return false; }
|
||||||
|
|
||||||
|
void ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||||
|
{
|
||||||
|
const auto & dynamic_column = assert_cast<const ColumnDynamic & >(*columns[0]);
|
||||||
|
if (dynamic_column.isNullAt(row_num))
|
||||||
|
return;
|
||||||
|
|
||||||
|
data(place).add(dynamic_column.getTypeNameAt(row_num));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ALWAYS_INLINE addBatchSinglePlace(
|
||||||
|
size_t row_begin, size_t row_end, AggregateDataPtr __restrict place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos)
|
||||||
|
const override
|
||||||
|
{
|
||||||
|
if (if_argument_pos >= 0 || row_begin != 0 || row_end != columns[0]->size())
|
||||||
|
IAggregateFunctionDataHelper::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||||
|
/// Optimization for case when we add all rows from the column into single place.
|
||||||
|
/// In this case we can avoid iterating over all rows because we can get all types
|
||||||
|
/// in Dynamic column in a more efficient way.
|
||||||
|
else
|
||||||
|
assert_cast<const ColumnDynamic & >(*columns[0]).getAllTypeNamesInto(data(place).data);
|
||||||
|
}
|
||||||
|
|
||||||
|
void addManyDefaults(
|
||||||
|
AggregateDataPtr __restrict /*place*/,
|
||||||
|
const IColumn ** /*columns*/,
|
||||||
|
size_t /*length*/,
|
||||||
|
Arena * /*arena*/) const override
|
||||||
|
{
|
||||||
|
/// Default value for Dynamic is NULL, so nothing to add.
|
||||||
|
}
|
||||||
|
|
||||||
|
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||||
|
{
|
||||||
|
data(place).merge(data(rhs));
|
||||||
|
}
|
||||||
|
|
||||||
|
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||||
|
{
|
||||||
|
data(place).serialize(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||||
|
{
|
||||||
|
data(place).deserialize(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||||
|
{
|
||||||
|
data(place).insertResultInto(to);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
AggregateFunctionPtr createAggregateFunctionDistinctDynamicTypes(
|
||||||
|
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||||
|
{
|
||||||
|
assertNoParameters(name, parameters);
|
||||||
|
if (argument_types.size() != 1)
|
||||||
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||||
|
"Incorrect number of arguments for aggregate function {}. Expected single argument with type Dynamic, got {} arguments", name, argument_types.size());
|
||||||
|
|
||||||
|
if (!isDynamic(argument_types[0]))
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}. Expected type Dynamic", argument_types[0]->getName(), name);
|
||||||
|
|
||||||
|
return std::make_shared<AggregateFunctionDistinctDynamicTypes>(argument_types);
|
||||||
|
}
|
||||||
|
|
||||||
|
void registerAggregateFunctionDistinctDynamicTypes(AggregateFunctionFactory & factory)
|
||||||
|
{
|
||||||
|
factory.registerFunction("distinctDynamicTypes", createAggregateFunctionDistinctDynamicTypes);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
350
src/AggregateFunctions/AggregateFunctionDistinctJSONPaths.cpp
Normal file
350
src/AggregateFunctions/AggregateFunctionDistinctJSONPaths.cpp
Normal file
@ -0,0 +1,350 @@
|
|||||||
|
#include <unordered_set>
|
||||||
|
#include <IO/WriteHelpers.h>
|
||||||
|
#include <IO/ReadHelpers.h>
|
||||||
|
|
||||||
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
#include <DataTypes/DataTypeArray.h>
|
||||||
|
#include <DataTypes/DataTypeMap.h>
|
||||||
|
#include <DataTypes/DataTypeObject.h>
|
||||||
|
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||||
|
#include <Columns/ColumnDynamic.h>
|
||||||
|
#include <Columns/ColumnObject.h>
|
||||||
|
#include <Columns/ColumnMap.h>
|
||||||
|
|
||||||
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
|
#include <AggregateFunctions/FactoryHelpers.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
extern const int TOO_LARGE_ARRAY_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr static size_t DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE = 0xFFFFFF;
|
||||||
|
|
||||||
|
|
||||||
|
struct AggregateFunctionDistinctJSONPathsData
|
||||||
|
{
|
||||||
|
static constexpr auto name = "distinctJSONPaths";
|
||||||
|
|
||||||
|
std::unordered_set<String> data;
|
||||||
|
|
||||||
|
void add(const ColumnObject & column, size_t row_num, const std::unordered_map<String, String> &)
|
||||||
|
{
|
||||||
|
for (const auto & [path, _] : column.getTypedPaths())
|
||||||
|
data.insert(path);
|
||||||
|
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
|
||||||
|
{
|
||||||
|
/// Add path from dynamic paths only if it's not NULL in this row.
|
||||||
|
if (!dynamic_column->isNullAt(row_num))
|
||||||
|
data.insert(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate over paths in shared data in this row.
|
||||||
|
const auto [shared_data_paths, _] = column.getSharedDataPathsAndValues();
|
||||||
|
const auto & shared_data_offsets = column.getSharedDataOffsets();
|
||||||
|
const size_t start = shared_data_offsets[static_cast<ssize_t>(row_num) - 1];
|
||||||
|
const size_t end = shared_data_offsets[static_cast<ssize_t>(row_num)];
|
||||||
|
for (size_t i = start; i != end; ++i)
|
||||||
|
data.insert(shared_data_paths->getDataAt(i).toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
void addWholeColumn(const ColumnObject & column, const std::unordered_map<String, String> &)
|
||||||
|
{
|
||||||
|
for (const auto & [path, _] : column.getTypedPaths())
|
||||||
|
data.insert(path);
|
||||||
|
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
|
||||||
|
{
|
||||||
|
/// Add dynamic path only if it has at least one non-null value.
|
||||||
|
/// getNumberOfDefaultRows for Dynamic column is O(1).
|
||||||
|
if (dynamic_column->getNumberOfDefaultRows() != dynamic_column->size())
|
||||||
|
data.insert(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate over all paths in shared data.
|
||||||
|
const auto [shared_data_paths, _] = column.getSharedDataPathsAndValues();
|
||||||
|
for (size_t i = 0; i != shared_data_paths->size(); ++i)
|
||||||
|
data.insert(shared_data_paths->getDataAt(i).toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
void merge(const AggregateFunctionDistinctJSONPathsData & other)
|
||||||
|
{
|
||||||
|
data.insert(other.data.begin(), other.data.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
void serialize(WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeVarUInt(data.size(), buf);
|
||||||
|
for (const auto & path : data)
|
||||||
|
writeStringBinary(path, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void deserialize(ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
size_t size;
|
||||||
|
readVarUInt(size, buf);
|
||||||
|
if (size > DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE)
|
||||||
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size (maximum: {}): {}", DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE, size);
|
||||||
|
|
||||||
|
String path;
|
||||||
|
for (size_t i = 0; i != size; ++i)
|
||||||
|
{
|
||||||
|
readStringBinary(path, buf);
|
||||||
|
data.insert(path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void insertResultInto(IColumn & column)
|
||||||
|
{
|
||||||
|
/// Insert paths in sorted order for better output.
|
||||||
|
auto & array_column = assert_cast<ColumnArray &>(column);
|
||||||
|
auto & string_column = assert_cast<ColumnString &>(array_column.getData());
|
||||||
|
std::vector<String> sorted_data(data.begin(), data.end());
|
||||||
|
std::sort(sorted_data.begin(), sorted_data.end());
|
||||||
|
for (const auto & path : sorted_data)
|
||||||
|
string_column.insertData(path.data(), path.size());
|
||||||
|
array_column.getOffsets().push_back(string_column.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
static DataTypePtr getResultType()
|
||||||
|
{
|
||||||
|
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct AggregateFunctionDistinctJSONPathsAndTypesData
|
||||||
|
{
|
||||||
|
static constexpr auto name = "distinctJSONPathsAndTypes";
|
||||||
|
|
||||||
|
std::unordered_map<String, std::unordered_set<String>> data;
|
||||||
|
|
||||||
|
void add(const ColumnObject & column, size_t row_num, const std::unordered_map<String, String> & typed_paths_type_names)
|
||||||
|
{
|
||||||
|
for (const auto & [path, _] : column.getTypedPaths())
|
||||||
|
data[path].insert(typed_paths_type_names.at(path));
|
||||||
|
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
|
||||||
|
{
|
||||||
|
if (!dynamic_column->isNullAt(row_num))
|
||||||
|
data[path].insert(dynamic_column->getTypeNameAt(row_num));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate over paths om shared data in this row and decode the data types.
|
||||||
|
const auto [shared_data_paths, shared_data_values] = column.getSharedDataPathsAndValues();
|
||||||
|
const auto & shared_data_offsets = column.getSharedDataOffsets();
|
||||||
|
const size_t start = shared_data_offsets[static_cast<ssize_t>(row_num) - 1];
|
||||||
|
const size_t end = shared_data_offsets[static_cast<ssize_t>(row_num)];
|
||||||
|
for (size_t i = start; i != end; ++i)
|
||||||
|
{
|
||||||
|
auto path = shared_data_paths->getDataAt(i).toString();
|
||||||
|
auto value = shared_data_values->getDataAt(i);
|
||||||
|
ReadBufferFromMemory buf(value.data, value.size);
|
||||||
|
auto type = decodeDataType(buf);
|
||||||
|
/// We should not have Nulls here but let's check just in case.
|
||||||
|
chassert(!isNothing(type));
|
||||||
|
data[path].insert(type->getName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void addWholeColumn(const ColumnObject & column, const std::unordered_map<String, String> & typed_paths_type_names)
|
||||||
|
{
|
||||||
|
for (const auto & [path, _] : column.getTypedPaths())
|
||||||
|
data[path].insert(typed_paths_type_names.at(path));
|
||||||
|
for (const auto & [path, dynamic_column] : column.getDynamicPathsPtrs())
|
||||||
|
{
|
||||||
|
/// Add dynamic path only if it has at least one non-null value.
|
||||||
|
/// getNumberOfDefaultRows for Dynamic column is O(1).
|
||||||
|
if (dynamic_column->getNumberOfDefaultRows() != dynamic_column->size())
|
||||||
|
dynamic_column->getAllTypeNamesInto(data[path]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate over all paths in shared data and decode the data types.
|
||||||
|
const auto [shared_data_paths, shared_data_values] = column.getSharedDataPathsAndValues();
|
||||||
|
for (size_t i = 0; i != shared_data_paths->size(); ++i)
|
||||||
|
{
|
||||||
|
auto path = shared_data_paths->getDataAt(i).toString();
|
||||||
|
auto value = shared_data_values->getDataAt(i);
|
||||||
|
ReadBufferFromMemory buf(value.data, value.size);
|
||||||
|
auto type = decodeDataType(buf);
|
||||||
|
/// We should not have Nulls here but let's check just in case.
|
||||||
|
chassert(!isNothing(type));
|
||||||
|
data[path].insert(type->getName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void merge(const AggregateFunctionDistinctJSONPathsAndTypesData & other)
|
||||||
|
{
|
||||||
|
for (const auto & [path, types] : other.data)
|
||||||
|
data[path].insert(types.begin(), types.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
void serialize(WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeVarUInt(data.size(), buf);
|
||||||
|
for (const auto & [path, types] : data)
|
||||||
|
{
|
||||||
|
writeStringBinary(path, buf);
|
||||||
|
writeVarUInt(types.size(), buf);
|
||||||
|
for (const auto & type : types)
|
||||||
|
writeStringBinary(type, buf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void deserialize(ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
size_t paths_size, types_size;
|
||||||
|
readVarUInt(paths_size, buf);
|
||||||
|
if (paths_size > DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE)
|
||||||
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size for paths (maximum: {}): {}", DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE, paths_size);
|
||||||
|
|
||||||
|
data.reserve(paths_size);
|
||||||
|
String path, type;
|
||||||
|
for (size_t i = 0; i != paths_size; ++i)
|
||||||
|
{
|
||||||
|
readStringBinary(path, buf);
|
||||||
|
readVarUInt(types_size, buf);
|
||||||
|
if (types_size > DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE)
|
||||||
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size for types (maximum: {}): {}", DISTINCT_JSON_PATHS_MAX_ARRAY_SIZE, types_size);
|
||||||
|
|
||||||
|
data[path].reserve(types_size);
|
||||||
|
for (size_t j = 0; j != types_size; ++j)
|
||||||
|
{
|
||||||
|
readStringBinary(type, buf);
|
||||||
|
data[path].insert(type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void insertResultInto(IColumn & column)
|
||||||
|
{
|
||||||
|
/// Insert sorted paths and types for better output.
|
||||||
|
auto & array_column = assert_cast<ColumnMap &>(column).getNestedColumn();
|
||||||
|
auto & tuple_column = assert_cast<ColumnTuple &>(array_column.getData());
|
||||||
|
auto & key_column = assert_cast<ColumnString &>(tuple_column.getColumn(0));
|
||||||
|
auto & value_column = assert_cast<ColumnArray &>(tuple_column.getColumn(1));
|
||||||
|
auto & value_column_data = assert_cast<ColumnString &>(value_column.getData());
|
||||||
|
std::vector<std::pair<String, std::vector<String>>> sorted_data;
|
||||||
|
sorted_data.reserve(data.size());
|
||||||
|
for (const auto & [path, types] : data)
|
||||||
|
{
|
||||||
|
std::vector<String> sorted_types(types.begin(), types.end());
|
||||||
|
std::sort(sorted_types.begin(), sorted_types.end());
|
||||||
|
sorted_data.emplace_back(path, std::move(sorted_types));
|
||||||
|
}
|
||||||
|
std::sort(sorted_data.begin(), sorted_data.end());
|
||||||
|
|
||||||
|
for (const auto & [path, types] : sorted_data)
|
||||||
|
{
|
||||||
|
key_column.insertData(path.data(), path.size());
|
||||||
|
for (const auto & type : types)
|
||||||
|
value_column_data.insertData(type.data(), type.size());
|
||||||
|
value_column.getOffsets().push_back(value_column_data.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
array_column.getOffsets().push_back(key_column.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
static DataTypePtr getResultType()
|
||||||
|
{
|
||||||
|
return std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Calculates the list of distinct paths or pairs (path, type) in JSON column.
|
||||||
|
template <typename Data>
|
||||||
|
class AggregateFunctionDistinctJSONPathsAndTypes final : public IAggregateFunctionDataHelper<Data, AggregateFunctionDistinctJSONPathsAndTypes<Data>>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit AggregateFunctionDistinctJSONPathsAndTypes(const DataTypes & argument_types_)
|
||||||
|
: IAggregateFunctionDataHelper<Data, AggregateFunctionDistinctJSONPathsAndTypes<Data>>(
|
||||||
|
argument_types_, {}, Data::getResultType())
|
||||||
|
{
|
||||||
|
const auto & typed_paths_types = assert_cast<const DataTypeObject &>(*argument_types_[0]).getTypedPaths();
|
||||||
|
typed_paths_type_names.reserve(typed_paths_types.size());
|
||||||
|
for (const auto & [path, type] : typed_paths_types)
|
||||||
|
typed_paths_type_names[path] = type->getName();
|
||||||
|
}
|
||||||
|
|
||||||
|
String getName() const override { return Data::name; }
|
||||||
|
|
||||||
|
bool allocatesMemoryInArena() const override { return false; }
|
||||||
|
|
||||||
|
void ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||||
|
{
|
||||||
|
const auto & object_column = assert_cast<const ColumnObject & >(*columns[0]);
|
||||||
|
this->data(place).add(object_column, row_num, typed_paths_type_names);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ALWAYS_INLINE addBatchSinglePlace(
|
||||||
|
size_t row_begin, size_t row_end, AggregateDataPtr __restrict place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos)
|
||||||
|
const override
|
||||||
|
{
|
||||||
|
if (if_argument_pos >= 0 || row_begin != 0 || row_end != columns[0]->size())
|
||||||
|
IAggregateFunctionDataHelper<Data, AggregateFunctionDistinctJSONPathsAndTypes<Data>>::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||||
|
/// Optimization for case when we add all rows from the column into single place.
|
||||||
|
/// In this case we can avoid iterating over all rows because we can get all paths
|
||||||
|
/// and types in JSON column in a more efficient way.
|
||||||
|
else
|
||||||
|
this->data(place).addWholeColumn(assert_cast<const ColumnObject & >(*columns[0]), typed_paths_type_names);
|
||||||
|
}
|
||||||
|
|
||||||
|
void addManyDefaults(
|
||||||
|
AggregateDataPtr __restrict /*place*/,
|
||||||
|
const IColumn ** /*columns*/,
|
||||||
|
size_t /*length*/,
|
||||||
|
Arena * /*arena*/) const override
|
||||||
|
{
|
||||||
|
/// Default value for JSON is empty object, so nothing to add.
|
||||||
|
}
|
||||||
|
|
||||||
|
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||||
|
{
|
||||||
|
this->data(place).merge(this->data(rhs));
|
||||||
|
}
|
||||||
|
|
||||||
|
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||||
|
{
|
||||||
|
this->data(place).serialize(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||||
|
{
|
||||||
|
this->data(place).deserialize(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||||
|
{
|
||||||
|
this->data(place).insertResultInto(to);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::unordered_map<String, String> typed_paths_type_names;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Data>
|
||||||
|
AggregateFunctionPtr createAggregateFunctionDistinctJSONPathsAndTypes(
|
||||||
|
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||||
|
{
|
||||||
|
assertNoParameters(name, parameters);
|
||||||
|
if (argument_types.size() != 1)
|
||||||
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||||
|
"Incorrect number of arguments for aggregate function {}. Expected single argument with type JSON, got {} arguments", name, argument_types.size());
|
||||||
|
|
||||||
|
if (!isObject(argument_types[0]))
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}. Expected type JSON", argument_types[0]->getName(), name);
|
||||||
|
|
||||||
|
return std::make_shared<AggregateFunctionDistinctJSONPathsAndTypes<Data>>(argument_types);
|
||||||
|
}
|
||||||
|
|
||||||
|
void registerAggregateFunctionDistinctJSONPathsAndTypes(AggregateFunctionFactory & factory)
|
||||||
|
{
|
||||||
|
factory.registerFunction("distinctJSONPaths", createAggregateFunctionDistinctJSONPathsAndTypes<AggregateFunctionDistinctJSONPathsData>);
|
||||||
|
factory.registerFunction("distinctJSONPathsAndTypes", createAggregateFunctionDistinctJSONPathsAndTypes<AggregateFunctionDistinctJSONPathsAndTypesData>);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -116,15 +116,17 @@ class GroupConcatImpl final
|
|||||||
SerializationPtr serialization;
|
SerializationPtr serialization;
|
||||||
UInt64 limit;
|
UInt64 limit;
|
||||||
const String delimiter;
|
const String delimiter;
|
||||||
|
const DataTypePtr type;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
GroupConcatImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 limit_, const String & delimiter_)
|
GroupConcatImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 limit_, const String & delimiter_)
|
||||||
: IAggregateFunctionDataHelper<GroupConcatData<has_limit>, GroupConcatImpl<has_limit>>(
|
: IAggregateFunctionDataHelper<GroupConcatData<has_limit>, GroupConcatImpl<has_limit>>(
|
||||||
{data_type_}, parameters_, std::make_shared<DataTypeString>())
|
{data_type_}, parameters_, std::make_shared<DataTypeString>())
|
||||||
, serialization(this->argument_types[0]->getDefaultSerialization())
|
|
||||||
, limit(limit_)
|
, limit(limit_)
|
||||||
, delimiter(delimiter_)
|
, delimiter(delimiter_)
|
||||||
|
, type(data_type_)
|
||||||
{
|
{
|
||||||
|
serialization = isFixedString(type) ? std::make_shared<DataTypeString>()->getDefaultSerialization() : this->argument_types[0]->getDefaultSerialization();
|
||||||
}
|
}
|
||||||
|
|
||||||
String getName() const override { return name; }
|
String getName() const override { return name; }
|
||||||
@ -140,7 +142,14 @@ public:
|
|||||||
if (cur_data.data_size != 0)
|
if (cur_data.data_size != 0)
|
||||||
cur_data.insertChar(delimiter.c_str(), delimiter.size(), arena);
|
cur_data.insertChar(delimiter.c_str(), delimiter.size(), arena);
|
||||||
|
|
||||||
cur_data.insert(columns[0], serialization, row_num, arena);
|
if (isFixedString(type))
|
||||||
|
{
|
||||||
|
ColumnWithTypeAndName col = {columns[0]->getPtr(), type, "column"};
|
||||||
|
const auto & col_str = castColumn(col, std::make_shared<DataTypeString>());
|
||||||
|
cur_data.insert(col_str.get(), serialization, row_num, arena);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
cur_data.insert(columns[0], serialization, row_num, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||||
|
@ -459,6 +459,8 @@ public:
|
|||||||
|
|
||||||
bool isParallelizeMergePrepareNeeded() const override { return is_parallelize_merge_prepare_needed; }
|
bool isParallelizeMergePrepareNeeded() const override { return is_parallelize_merge_prepare_needed; }
|
||||||
|
|
||||||
|
constexpr static bool parallelizeMergeWithKey() { return true; }
|
||||||
|
|
||||||
void parallelizeMergePrepare(AggregateDataPtrs & places, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled) const override
|
void parallelizeMergePrepare(AggregateDataPtrs & places, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled) const override
|
||||||
{
|
{
|
||||||
if constexpr (is_parallelize_merge_prepare_needed)
|
if constexpr (is_parallelize_merge_prepare_needed)
|
||||||
|
@ -145,6 +145,8 @@ public:
|
|||||||
|
|
||||||
virtual bool isParallelizeMergePrepareNeeded() const { return false; }
|
virtual bool isParallelizeMergePrepareNeeded() const { return false; }
|
||||||
|
|
||||||
|
constexpr static bool parallelizeMergeWithKey() { return false; }
|
||||||
|
|
||||||
virtual void parallelizeMergePrepare(AggregateDataPtrs & /*places*/, ThreadPool & /*thread_pool*/, std::atomic<bool> & /*is_cancelled*/) const
|
virtual void parallelizeMergePrepare(AggregateDataPtrs & /*places*/, ThreadPool & /*thread_pool*/, std::atomic<bool> & /*is_cancelled*/) const
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "parallelizeMergePrepare() with thread pool parameter isn't implemented for {} ", getName());
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "parallelizeMergePrepare() with thread pool parameter isn't implemented for {} ", getName());
|
||||||
@ -169,7 +171,7 @@ public:
|
|||||||
|
|
||||||
/// Merges states (on which src places points to) with other states (on which dst places points to) of current aggregation function
|
/// Merges states (on which src places points to) with other states (on which dst places points to) of current aggregation function
|
||||||
/// then destroy states (on which src places points to).
|
/// then destroy states (on which src places points to).
|
||||||
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, Arena * arena) const = 0;
|
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled, Arena * arena) const = 0;
|
||||||
|
|
||||||
/// Serializes state (to transmit it over the network, for example).
|
/// Serializes state (to transmit it over the network, for example).
|
||||||
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version = std::nullopt) const = 0; /// NOLINT
|
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version = std::nullopt) const = 0; /// NOLINT
|
||||||
@ -499,11 +501,15 @@ public:
|
|||||||
static_cast<const Derived *>(this)->merge(places[i] + place_offset, rhs[i], arena);
|
static_cast<const Derived *>(this)->merge(places[i] + place_offset, rhs[i], arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, Arena * arena) const override
|
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled, Arena * arena) const override
|
||||||
{
|
{
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
|
if constexpr (Derived::parallelizeMergeWithKey())
|
||||||
|
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, thread_pool, is_cancelled, arena);
|
||||||
|
else
|
||||||
|
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
|
||||||
|
|
||||||
static_cast<const Derived *>(this)->destroy(rhs_places[i] + offset);
|
static_cast<const Derived *>(this)->destroy(rhs_places[i] + offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -101,6 +101,13 @@ public:
|
|||||||
|
|
||||||
auto merge(const UniqExactSet & other, ThreadPool * thread_pool = nullptr, std::atomic<bool> * is_cancelled = nullptr)
|
auto merge(const UniqExactSet & other, ThreadPool * thread_pool = nullptr, std::atomic<bool> * is_cancelled = nullptr)
|
||||||
{
|
{
|
||||||
|
/// If the size is large, we may convert the singleLevelHash to twoLevelHash and merge in parallel.
|
||||||
|
if (other.size() > 40000)
|
||||||
|
{
|
||||||
|
if (isSingleLevel())
|
||||||
|
convertToTwoLevel();
|
||||||
|
}
|
||||||
|
|
||||||
if (isSingleLevel() && other.isTwoLevel())
|
if (isSingleLevel() && other.isTwoLevel())
|
||||||
convertToTwoLevel();
|
convertToTwoLevel();
|
||||||
|
|
||||||
|
@ -89,6 +89,8 @@ void registerAggregateFunctionAnalysisOfVariance(AggregateFunctionFactory &);
|
|||||||
void registerAggregateFunctionFlameGraph(AggregateFunctionFactory &);
|
void registerAggregateFunctionFlameGraph(AggregateFunctionFactory &);
|
||||||
void registerAggregateFunctionKolmogorovSmirnovTest(AggregateFunctionFactory & factory);
|
void registerAggregateFunctionKolmogorovSmirnovTest(AggregateFunctionFactory & factory);
|
||||||
void registerAggregateFunctionLargestTriangleThreeBuckets(AggregateFunctionFactory & factory);
|
void registerAggregateFunctionLargestTriangleThreeBuckets(AggregateFunctionFactory & factory);
|
||||||
|
void registerAggregateFunctionDistinctDynamicTypes(AggregateFunctionFactory & factory);
|
||||||
|
void registerAggregateFunctionDistinctJSONPathsAndTypes(AggregateFunctionFactory & factory);
|
||||||
|
|
||||||
class AggregateFunctionCombinatorFactory;
|
class AggregateFunctionCombinatorFactory;
|
||||||
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &);
|
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &);
|
||||||
@ -191,6 +193,8 @@ void registerAggregateFunctions()
|
|||||||
registerAggregateFunctionFlameGraph(factory);
|
registerAggregateFunctionFlameGraph(factory);
|
||||||
registerAggregateFunctionKolmogorovSmirnovTest(factory);
|
registerAggregateFunctionKolmogorovSmirnovTest(factory);
|
||||||
registerAggregateFunctionLargestTriangleThreeBuckets(factory);
|
registerAggregateFunctionLargestTriangleThreeBuckets(factory);
|
||||||
|
registerAggregateFunctionDistinctDynamicTypes(factory);
|
||||||
|
registerAggregateFunctionDistinctJSONPathsAndTypes(factory);
|
||||||
|
|
||||||
registerWindowFunctions(factory);
|
registerWindowFunctions(factory);
|
||||||
}
|
}
|
||||||
|
@ -913,11 +913,15 @@ void RestorerFromBackup::createTable(const QualifiedTableName & table_name)
|
|||||||
table_info.database = DatabaseCatalog::instance().getDatabase(table_name.database);
|
table_info.database = DatabaseCatalog::instance().getDatabase(table_name.database);
|
||||||
DatabasePtr database = table_info.database;
|
DatabasePtr database = table_info.database;
|
||||||
|
|
||||||
|
auto query_context = Context::createCopy(context);
|
||||||
|
query_context->setSetting("database_replicated_allow_explicit_uuid", 3);
|
||||||
|
query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3);
|
||||||
|
|
||||||
/// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some
|
/// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some
|
||||||
/// database-specific things).
|
/// database-specific things).
|
||||||
database->createTableRestoredFromBackup(
|
database->createTableRestoredFromBackup(
|
||||||
create_table_query,
|
create_table_query,
|
||||||
context,
|
query_context,
|
||||||
restore_coordination,
|
restore_coordination,
|
||||||
std::chrono::duration_cast<std::chrono::milliseconds>(create_table_timeout).count());
|
std::chrono::duration_cast<std::chrono::milliseconds>(create_table_timeout).count());
|
||||||
}
|
}
|
||||||
|
@ -176,7 +176,7 @@ add_library (clickhouse_new_delete STATIC Common/new_delete.cpp)
|
|||||||
target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io)
|
target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io)
|
||||||
if (TARGET ch_contrib::jemalloc)
|
if (TARGET ch_contrib::jemalloc)
|
||||||
target_link_libraries (clickhouse_new_delete PRIVATE ch_contrib::jemalloc)
|
target_link_libraries (clickhouse_new_delete PRIVATE ch_contrib::jemalloc)
|
||||||
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::jemalloc)
|
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::jemalloc)
|
||||||
target_link_libraries (clickhouse_storages_system PRIVATE ch_contrib::jemalloc)
|
target_link_libraries (clickhouse_storages_system PRIVATE ch_contrib::jemalloc)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -1896,6 +1896,21 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
|||||||
/// Temporarily apply query settings to context.
|
/// Temporarily apply query settings to context.
|
||||||
std::optional<Settings> old_settings;
|
std::optional<Settings> old_settings;
|
||||||
SCOPE_EXIT_SAFE({
|
SCOPE_EXIT_SAFE({
|
||||||
|
try
|
||||||
|
{
|
||||||
|
/// We need to park ParallelFormating threads,
|
||||||
|
/// because they can use settings from global context
|
||||||
|
/// and it can lead to data race with `setSettings`
|
||||||
|
resetOutput();
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
if (!have_error)
|
||||||
|
{
|
||||||
|
client_exception = std::make_unique<Exception>(getCurrentExceptionMessageAndPattern(print_stack_trace), getCurrentExceptionCode());
|
||||||
|
have_error = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (old_settings)
|
if (old_settings)
|
||||||
client_context->setSettings(*old_settings);
|
client_context->setSettings(*old_settings);
|
||||||
});
|
});
|
||||||
@ -2699,14 +2714,6 @@ bool ClientBase::processMultiQueryFromFile(const String & file_name)
|
|||||||
ReadBufferFromFile in(file_name);
|
ReadBufferFromFile in(file_name);
|
||||||
readStringUntilEOF(queries_from_file, in);
|
readStringUntilEOF(queries_from_file, in);
|
||||||
|
|
||||||
if (!getClientConfiguration().has("log_comment"))
|
|
||||||
{
|
|
||||||
Settings settings = client_context->getSettingsCopy();
|
|
||||||
/// NOTE: cannot use even weakly_canonical() since it fails for /dev/stdin due to resolving of "pipe:[X]"
|
|
||||||
settings.log_comment = fs::absolute(fs::path(file_name));
|
|
||||||
client_context->setSettings(settings);
|
|
||||||
}
|
|
||||||
|
|
||||||
return executeMultiQuery(queries_from_file);
|
return executeMultiQuery(queries_from_file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
|||||||
{ return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, /*async_callback=*/ {}); };
|
{ return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, /*async_callback=*/ {}); };
|
||||||
|
|
||||||
return getManyImpl(settings, pool_mode, try_get_entry,
|
return getManyImpl(settings, pool_mode, try_get_entry,
|
||||||
/*skip_unavailable_endpoints=*/ std::nullopt,
|
/*skip_unavailable_endpoints=*/ false, /// skip_unavailable_endpoints is used to get the min number of entries, and we need at least one
|
||||||
/*priority_func=*/ {},
|
/*priority_func=*/ {},
|
||||||
settings.distributed_insert_skip_read_only_replicas);
|
settings.distributed_insert_skip_read_only_replicas);
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ public:
|
|||||||
size_t max_error_cap = DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT);
|
size_t max_error_cap = DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT);
|
||||||
|
|
||||||
using Entry = IConnectionPool::Entry;
|
using Entry = IConnectionPool::Entry;
|
||||||
using PoolWithFailoverBase<IConnectionPool>::isTryResultInvalid;
|
using PoolWithFailoverBase<IConnectionPool>::getValidTryResult;
|
||||||
|
|
||||||
/** Allocates connection to work. */
|
/** Allocates connection to work. */
|
||||||
Entry get(const ConnectionTimeouts & timeouts) override;
|
Entry get(const ConnectionTimeouts & timeouts) override;
|
||||||
@ -98,7 +98,7 @@ public:
|
|||||||
|
|
||||||
std::vector<Base::ShuffledPool> getShuffledPools(const Settings & settings, GetPriorityFunc priority_func = {}, bool use_slowdown_count = false);
|
std::vector<Base::ShuffledPool> getShuffledPools(const Settings & settings, GetPriorityFunc priority_func = {}, bool use_slowdown_count = false);
|
||||||
|
|
||||||
size_t getMaxErrorCup() const { return Base::max_error_cap; }
|
size_t getMaxErrorCap() const { return Base::max_error_cap; }
|
||||||
|
|
||||||
void updateSharedError(std::vector<ShuffledPool> & shuffled_pools)
|
void updateSharedError(std::vector<ShuffledPool> & shuffled_pools)
|
||||||
{
|
{
|
||||||
|
@ -327,7 +327,7 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::processFinishedConnect
|
|||||||
ShuffledPool & shuffled_pool = shuffled_pools[index];
|
ShuffledPool & shuffled_pool = shuffled_pools[index];
|
||||||
LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
|
LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
|
||||||
|
|
||||||
shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1);
|
shuffled_pool.error_count = std::min(pool->getMaxErrorCap(), shuffled_pool.error_count + 1);
|
||||||
shuffled_pool.slowdown_count = 0;
|
shuffled_pool.slowdown_count = 0;
|
||||||
|
|
||||||
if (shuffled_pool.error_count >= max_tries)
|
if (shuffled_pool.error_count >= max_tries)
|
||||||
|
@ -979,6 +979,41 @@ ColumnPtr ColumnDynamic::compress() const
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String ColumnDynamic::getTypeNameAt(size_t row_num) const
|
||||||
|
{
|
||||||
|
const auto & variant_col = getVariantColumn();
|
||||||
|
const size_t discr = variant_col.globalDiscriminatorAt(row_num);
|
||||||
|
if (discr == ColumnVariant::NULL_DISCRIMINATOR)
|
||||||
|
return "";
|
||||||
|
|
||||||
|
if (discr == getSharedVariantDiscriminator())
|
||||||
|
{
|
||||||
|
const auto value = getSharedVariant().getDataAt(variant_col.offsetAt(row_num));
|
||||||
|
ReadBufferFromMemory buf(value.data, value.size);
|
||||||
|
return decodeDataType(buf)->getName();
|
||||||
|
}
|
||||||
|
|
||||||
|
return variant_info.variant_names[discr];
|
||||||
|
}
|
||||||
|
|
||||||
|
void ColumnDynamic::getAllTypeNamesInto(std::unordered_set<String> & names) const
|
||||||
|
{
|
||||||
|
auto shared_variant_discr = getSharedVariantDiscriminator();
|
||||||
|
for (size_t i = 0; i != variant_info.variant_names.size(); ++i)
|
||||||
|
{
|
||||||
|
if (i != shared_variant_discr && !variant_column_ptr->getVariantByGlobalDiscriminator(i).empty())
|
||||||
|
names.insert(variant_info.variant_names[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto & shared_variant = getSharedVariant();
|
||||||
|
for (size_t i = 0; i != shared_variant.size(); ++i)
|
||||||
|
{
|
||||||
|
const auto value = shared_variant.getDataAt(i);
|
||||||
|
ReadBufferFromMemory buf(value.data, value.size);
|
||||||
|
names.insert(decodeDataType(buf)->getName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ColumnDynamic::prepareForSquashing(const Columns & source_columns)
|
void ColumnDynamic::prepareForSquashing(const Columns & source_columns)
|
||||||
{
|
{
|
||||||
if (source_columns.empty())
|
if (source_columns.empty())
|
||||||
|
@ -430,6 +430,9 @@ public:
|
|||||||
|
|
||||||
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) { return getVariantSerialization(variant_type, variant_type->getName()); }
|
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) { return getVariantSerialization(variant_type, variant_type->getName()); }
|
||||||
|
|
||||||
|
String getTypeNameAt(size_t row_num) const;
|
||||||
|
void getAllTypeNamesInto(std::unordered_set<String> & names) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void createVariantInfo(const DataTypePtr & variant_type);
|
void createVariantInfo(const DataTypePtr & variant_type);
|
||||||
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
#include <Common/AsynchronousMetrics.h>
|
|
||||||
|
|
||||||
#include <IO/MMappedFileCache.h>
|
#include <IO/MMappedFileCache.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/UncompressedCache.h>
|
#include <IO/UncompressedCache.h>
|
||||||
@ -8,8 +6,10 @@
|
|||||||
#include <base/find_symbols.h>
|
#include <base/find_symbols.h>
|
||||||
#include <base/getPageSize.h>
|
#include <base/getPageSize.h>
|
||||||
#include <sys/resource.h>
|
#include <sys/resource.h>
|
||||||
|
#include <Common/AsynchronousMetrics.h>
|
||||||
#include <Common/CurrentMetrics.h>
|
#include <Common/CurrentMetrics.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
#include <Common/Jemalloc.h>
|
||||||
#include <Common/filesystemHelpers.h>
|
#include <Common/filesystemHelpers.h>
|
||||||
#include <Common/formatReadable.h>
|
#include <Common/formatReadable.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
@ -69,10 +69,14 @@ static void openCgroupv2MetricFile(const std::string & filename, std::optional<R
|
|||||||
|
|
||||||
AsynchronousMetrics::AsynchronousMetrics(
|
AsynchronousMetrics::AsynchronousMetrics(
|
||||||
unsigned update_period_seconds,
|
unsigned update_period_seconds,
|
||||||
const ProtocolServerMetricsFunc & protocol_server_metrics_func_)
|
const ProtocolServerMetricsFunc & protocol_server_metrics_func_,
|
||||||
|
bool update_jemalloc_epoch_,
|
||||||
|
bool update_rss_)
|
||||||
: update_period(update_period_seconds)
|
: update_period(update_period_seconds)
|
||||||
, log(getLogger("AsynchronousMetrics"))
|
, log(getLogger("AsynchronousMetrics"))
|
||||||
, protocol_server_metrics_func(protocol_server_metrics_func_)
|
, protocol_server_metrics_func(protocol_server_metrics_func_)
|
||||||
|
, update_jemalloc_epoch(update_jemalloc_epoch_)
|
||||||
|
, update_rss(update_rss_)
|
||||||
{
|
{
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
openFileIfExists("/proc/cpuinfo", cpuinfo);
|
openFileIfExists("/proc/cpuinfo", cpuinfo);
|
||||||
@ -411,9 +415,7 @@ Value saveJemallocMetricImpl(
|
|||||||
const std::string & jemalloc_full_name,
|
const std::string & jemalloc_full_name,
|
||||||
const std::string & clickhouse_full_name)
|
const std::string & clickhouse_full_name)
|
||||||
{
|
{
|
||||||
Value value{};
|
auto value = getJemallocValue<Value>(jemalloc_full_name.c_str());
|
||||||
size_t size = sizeof(value);
|
|
||||||
mallctl(jemalloc_full_name.c_str(), &value, &size, nullptr, 0);
|
|
||||||
values[clickhouse_full_name] = AsynchronousMetricValue(value, "An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html");
|
values[clickhouse_full_name] = AsynchronousMetricValue(value, "An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html");
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
@ -768,8 +770,11 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
|||||||
// 'epoch' is a special mallctl -- it updates the statistics. Without it, all
|
// 'epoch' is a special mallctl -- it updates the statistics. Without it, all
|
||||||
// the following calls will return stale values. It increments and returns
|
// the following calls will return stale values. It increments and returns
|
||||||
// the current epoch number, which might be useful to log as a sanity check.
|
// the current epoch number, which might be useful to log as a sanity check.
|
||||||
auto epoch = updateJemallocEpoch();
|
auto epoch = update_jemalloc_epoch ? updateJemallocEpoch() : getJemallocValue<uint64_t>("epoch");
|
||||||
new_values["jemalloc.epoch"] = { epoch, "An internal incremental update number of the statistics of jemalloc (Jason Evans' memory allocator), used in all other `jemalloc` metrics." };
|
new_values["jemalloc.epoch"]
|
||||||
|
= {epoch,
|
||||||
|
"An internal incremental update number of the statistics of jemalloc (Jason Evans' memory allocator), used in all other "
|
||||||
|
"`jemalloc` metrics."};
|
||||||
|
|
||||||
// Collect the statistics themselves.
|
// Collect the statistics themselves.
|
||||||
saveJemallocMetric<size_t>(new_values, "allocated");
|
saveJemallocMetric<size_t>(new_values, "allocated");
|
||||||
@ -782,10 +787,10 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
|||||||
saveJemallocMetric<size_t>(new_values, "background_thread.num_threads");
|
saveJemallocMetric<size_t>(new_values, "background_thread.num_threads");
|
||||||
saveJemallocMetric<uint64_t>(new_values, "background_thread.num_runs");
|
saveJemallocMetric<uint64_t>(new_values, "background_thread.num_runs");
|
||||||
saveJemallocMetric<uint64_t>(new_values, "background_thread.run_intervals");
|
saveJemallocMetric<uint64_t>(new_values, "background_thread.run_intervals");
|
||||||
saveJemallocProf<size_t>(new_values, "active");
|
saveJemallocProf<bool>(new_values, "active");
|
||||||
saveAllArenasMetric<size_t>(new_values, "pactive");
|
saveAllArenasMetric<size_t>(new_values, "pactive");
|
||||||
[[maybe_unused]] size_t je_malloc_pdirty = saveAllArenasMetric<size_t>(new_values, "pdirty");
|
saveAllArenasMetric<size_t>(new_values, "pdirty");
|
||||||
[[maybe_unused]] size_t je_malloc_pmuzzy = saveAllArenasMetric<size_t>(new_values, "pmuzzy");
|
saveAllArenasMetric<size_t>(new_values, "pmuzzy");
|
||||||
saveAllArenasMetric<size_t>(new_values, "dirty_purged");
|
saveAllArenasMetric<size_t>(new_values, "dirty_purged");
|
||||||
saveAllArenasMetric<size_t>(new_values, "muzzy_purged");
|
saveAllArenasMetric<size_t>(new_values, "muzzy_purged");
|
||||||
#endif
|
#endif
|
||||||
@ -814,41 +819,8 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
|||||||
" It is unspecified whether it includes the per-thread stacks and most of the allocated memory, that is allocated with the 'mmap' system call."
|
" It is unspecified whether it includes the per-thread stacks and most of the allocated memory, that is allocated with the 'mmap' system call."
|
||||||
" This metric exists only for completeness reasons. I recommend to use the `MemoryResident` metric for monitoring."};
|
" This metric exists only for completeness reasons. I recommend to use the `MemoryResident` metric for monitoring."};
|
||||||
|
|
||||||
/// We must update the value of total_memory_tracker periodically.
|
if (update_rss)
|
||||||
/// Otherwise it might be calculated incorrectly - it can include a "drift" of memory amount.
|
MemoryTracker::updateRSS(data.resident);
|
||||||
/// See https://github.com/ClickHouse/ClickHouse/issues/10293
|
|
||||||
{
|
|
||||||
Int64 amount = total_memory_tracker.get();
|
|
||||||
Int64 peak = total_memory_tracker.getPeak();
|
|
||||||
Int64 rss = data.resident;
|
|
||||||
Int64 free_memory_in_allocator_arenas = 0;
|
|
||||||
|
|
||||||
#if USE_JEMALLOC
|
|
||||||
/// According to jemalloc man, pdirty is:
|
|
||||||
///
|
|
||||||
/// Number of pages within unused extents that are potentially
|
|
||||||
/// dirty, and for which madvise() or similar has not been called.
|
|
||||||
///
|
|
||||||
/// So they will be subtracted from RSS to make accounting more
|
|
||||||
/// accurate, since those pages are not really RSS but a memory
|
|
||||||
/// that can be used at anytime via jemalloc.
|
|
||||||
free_memory_in_allocator_arenas = je_malloc_pdirty * getPageSize();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
Int64 difference = rss - amount;
|
|
||||||
|
|
||||||
/// Log only if difference is high. This is for convenience. The threshold is arbitrary.
|
|
||||||
if (difference >= 1048576 || difference <= -1048576)
|
|
||||||
LOG_TRACE(log,
|
|
||||||
"MemoryTracking: was {}, peak {}, free memory in arenas {}, will set to {} (RSS), difference: {}",
|
|
||||||
ReadableSize(amount),
|
|
||||||
ReadableSize(peak),
|
|
||||||
ReadableSize(free_memory_in_allocator_arenas),
|
|
||||||
ReadableSize(rss),
|
|
||||||
ReadableSize(difference));
|
|
||||||
|
|
||||||
MemoryTracker::setRSS(rss, free_memory_in_allocator_arenas);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -1,15 +1,14 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <Common/CgroupsMemoryUsageObserver.h>
|
||||||
#include <Common/MemoryStatisticsOS.h>
|
#include <Common/MemoryStatisticsOS.h>
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool.h>
|
||||||
#include <Common/Stopwatch.h>
|
#include <Common/Stopwatch.h>
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
|
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <map>
|
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
@ -69,7 +68,9 @@ public:
|
|||||||
|
|
||||||
AsynchronousMetrics(
|
AsynchronousMetrics(
|
||||||
unsigned update_period_seconds,
|
unsigned update_period_seconds,
|
||||||
const ProtocolServerMetricsFunc & protocol_server_metrics_func_);
|
const ProtocolServerMetricsFunc & protocol_server_metrics_func_,
|
||||||
|
bool update_jemalloc_epoch_,
|
||||||
|
bool update_rss_);
|
||||||
|
|
||||||
virtual ~AsynchronousMetrics();
|
virtual ~AsynchronousMetrics();
|
||||||
|
|
||||||
@ -112,6 +113,9 @@ private:
|
|||||||
MemoryStatisticsOS memory_stat TSA_GUARDED_BY(data_mutex);
|
MemoryStatisticsOS memory_stat TSA_GUARDED_BY(data_mutex);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
[[maybe_unused]] const bool update_jemalloc_epoch;
|
||||||
|
[[maybe_unused]] const bool update_rss;
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
std::optional<ReadBufferFromFilePRead> meminfo TSA_GUARDED_BY(data_mutex);
|
std::optional<ReadBufferFromFilePRead> meminfo TSA_GUARDED_BY(data_mutex);
|
||||||
std::optional<ReadBufferFromFilePRead> loadavg TSA_GUARDED_BY(data_mutex);
|
std::optional<ReadBufferFromFilePRead> loadavg TSA_GUARDED_BY(data_mutex);
|
||||||
|
@ -14,239 +14,21 @@
|
|||||||
#include <fmt/ranges.h>
|
#include <fmt/ranges.h>
|
||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <filesystem>
|
|
||||||
#include <memory>
|
|
||||||
#include <optional>
|
|
||||||
|
|
||||||
#include "config.h"
|
|
||||||
#if USE_JEMALLOC
|
|
||||||
# include <jemalloc/jemalloc.h>
|
|
||||||
#define STRINGIFY_HELPER(x) #x
|
|
||||||
#define STRINGIFY(x) STRINGIFY_HELPER(x)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
using namespace DB;
|
using namespace DB;
|
||||||
namespace fs = std::filesystem;
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int FILE_DOESNT_EXIST;
|
|
||||||
extern const int INCORRECT_DATA;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace
|
|
||||||
{
|
|
||||||
|
|
||||||
/// Format is
|
|
||||||
/// kernel 5
|
|
||||||
/// rss 15
|
|
||||||
/// [...]
|
|
||||||
using Metrics = std::map<std::string, uint64_t>;
|
|
||||||
|
|
||||||
Metrics readAllMetricsFromStatFile(ReadBufferFromFile & buf)
|
|
||||||
{
|
|
||||||
Metrics metrics;
|
|
||||||
while (!buf.eof())
|
|
||||||
{
|
|
||||||
std::string current_key;
|
|
||||||
readStringUntilWhitespace(current_key, buf);
|
|
||||||
|
|
||||||
assertChar(' ', buf);
|
|
||||||
|
|
||||||
uint64_t value = 0;
|
|
||||||
readIntText(value, buf);
|
|
||||||
assertChar('\n', buf);
|
|
||||||
|
|
||||||
auto [_, inserted] = metrics.emplace(std::move(current_key), value);
|
|
||||||
chassert(inserted, "Duplicate keys in stat file");
|
|
||||||
}
|
|
||||||
return metrics;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key)
|
|
||||||
{
|
|
||||||
const auto all_metrics = readAllMetricsFromStatFile(buf);
|
|
||||||
if (const auto it = all_metrics.find(key); it != all_metrics.end())
|
|
||||||
return it->second;
|
|
||||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find '{}' in '{}'", key, buf.getFileName());
|
|
||||||
}
|
|
||||||
|
|
||||||
struct CgroupsV1Reader : ICgroupsReader
|
|
||||||
{
|
|
||||||
explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
|
|
||||||
|
|
||||||
uint64_t readMemoryUsage() override
|
|
||||||
{
|
|
||||||
std::lock_guard lock(mutex);
|
|
||||||
buf.rewind();
|
|
||||||
return readMetricFromStatFile(buf, "rss");
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string dumpAllStats() override
|
|
||||||
{
|
|
||||||
std::lock_guard lock(mutex);
|
|
||||||
buf.rewind();
|
|
||||||
return fmt::format("{}", readAllMetricsFromStatFile(buf));
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
std::mutex mutex;
|
|
||||||
ReadBufferFromFile buf TSA_GUARDED_BY(mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct CgroupsV2Reader : ICgroupsReader
|
|
||||||
{
|
|
||||||
explicit CgroupsV2Reader(const fs::path & stat_file_dir)
|
|
||||||
: current_buf(stat_file_dir / "memory.current"), stat_buf(stat_file_dir / "memory.stat")
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t readMemoryUsage() override
|
|
||||||
{
|
|
||||||
std::lock_guard lock(mutex);
|
|
||||||
current_buf.rewind();
|
|
||||||
stat_buf.rewind();
|
|
||||||
|
|
||||||
int64_t mem_usage = 0;
|
|
||||||
/// memory.current contains a single number
|
|
||||||
/// the reason why we subtract it described here: https://github.com/ClickHouse/ClickHouse/issues/64652#issuecomment-2149630667
|
|
||||||
readIntText(mem_usage, current_buf);
|
|
||||||
mem_usage -= readMetricFromStatFile(stat_buf, "inactive_file");
|
|
||||||
chassert(mem_usage >= 0, "Negative memory usage");
|
|
||||||
return mem_usage;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string dumpAllStats() override
|
|
||||||
{
|
|
||||||
std::lock_guard lock(mutex);
|
|
||||||
stat_buf.rewind();
|
|
||||||
return fmt::format("{}", readAllMetricsFromStatFile(stat_buf));
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
std::mutex mutex;
|
|
||||||
ReadBufferFromFile current_buf TSA_GUARDED_BY(mutex);
|
|
||||||
ReadBufferFromFile stat_buf TSA_GUARDED_BY(mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Caveats:
|
|
||||||
/// - All of the logic in this file assumes that the current process is the only process in the
|
|
||||||
/// containing cgroup (or more precisely: the only process with significant memory consumption).
|
|
||||||
/// If this is not the case, then other processe's memory consumption may affect the internal
|
|
||||||
/// memory tracker ...
|
|
||||||
/// - Cgroups v1 and v2 allow nested cgroup hierarchies. As v1 is deprecated for over half a
|
|
||||||
/// decade and will go away at some point, hierarchical detection is only implemented for v2.
|
|
||||||
/// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such
|
|
||||||
/// systems existed only for a short transition period.
|
|
||||||
|
|
||||||
std::optional<std::string> getCgroupsV1Path()
|
|
||||||
{
|
|
||||||
auto path = default_cgroups_mount / "memory/memory.stat";
|
|
||||||
if (!fs::exists(path))
|
|
||||||
return {};
|
|
||||||
return {default_cgroups_mount / "memory"};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::pair<std::string, CgroupsMemoryUsageObserver::CgroupsVersion> getCgroupsPath()
|
|
||||||
{
|
|
||||||
auto v2_path = getCgroupsV2PathContainingFile("memory.current");
|
|
||||||
if (v2_path.has_value())
|
|
||||||
return {*v2_path, CgroupsMemoryUsageObserver::CgroupsVersion::V2};
|
|
||||||
|
|
||||||
auto v1_path = getCgroupsV1Path();
|
|
||||||
if (v1_path.has_value())
|
|
||||||
return {*v1_path, CgroupsMemoryUsageObserver::CgroupsVersion::V1};
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot find cgroups v1 or v2 current memory file");
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
CgroupsMemoryUsageObserver::CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_)
|
CgroupsMemoryUsageObserver::CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_)
|
||||||
: log(getLogger("CgroupsMemoryUsageObserver")), wait_time(wait_time_)
|
: log(getLogger("CgroupsMemoryUsageObserver")), wait_time(wait_time_)
|
||||||
{
|
{}
|
||||||
const auto [cgroup_path, version] = getCgroupsPath();
|
|
||||||
|
|
||||||
cgroup_reader = createCgroupsReader(version, cgroup_path);
|
|
||||||
|
|
||||||
LOG_INFO(
|
|
||||||
log,
|
|
||||||
"Will read the current memory usage from '{}' (cgroups version: {}), wait time is {} sec",
|
|
||||||
cgroup_path,
|
|
||||||
(version == CgroupsVersion::V1) ? "v1" : "v2",
|
|
||||||
wait_time.count());
|
|
||||||
}
|
|
||||||
|
|
||||||
CgroupsMemoryUsageObserver::~CgroupsMemoryUsageObserver()
|
CgroupsMemoryUsageObserver::~CgroupsMemoryUsageObserver()
|
||||||
{
|
{
|
||||||
stopThread();
|
stopThread();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CgroupsMemoryUsageObserver::setMemoryUsageLimits(uint64_t hard_limit_, uint64_t soft_limit_)
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> limit_lock(limit_mutex);
|
|
||||||
|
|
||||||
if (hard_limit_ == hard_limit && soft_limit_ == soft_limit)
|
|
||||||
return;
|
|
||||||
|
|
||||||
hard_limit = hard_limit_;
|
|
||||||
soft_limit = soft_limit_;
|
|
||||||
|
|
||||||
on_hard_limit = [this, hard_limit_](bool up)
|
|
||||||
{
|
|
||||||
if (up)
|
|
||||||
{
|
|
||||||
LOG_WARNING(log, "Exceeded hard memory limit ({})", ReadableSize(hard_limit_));
|
|
||||||
|
|
||||||
/// Update current usage in memory tracker. Also reset free_memory_in_allocator_arenas to zero though we don't know if they are
|
|
||||||
/// really zero. Trying to avoid OOM ...
|
|
||||||
MemoryTracker::setRSS(hard_limit_, 0);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
LOG_INFO(log, "Dropped below hard memory limit ({})", ReadableSize(hard_limit_));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
on_soft_limit = [this, soft_limit_](bool up)
|
|
||||||
{
|
|
||||||
if (up)
|
|
||||||
{
|
|
||||||
LOG_WARNING(log, "Exceeded soft memory limit ({})", ReadableSize(soft_limit_));
|
|
||||||
|
|
||||||
# if USE_JEMALLOC
|
|
||||||
LOG_INFO(log, "Purging jemalloc arenas");
|
|
||||||
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0);
|
|
||||||
# endif
|
|
||||||
/// Reset current usage in memory tracker. Expect zero for free_memory_in_allocator_arenas as we just purged them.
|
|
||||||
uint64_t memory_usage = cgroup_reader->readMemoryUsage();
|
|
||||||
LOG_TRACE(
|
|
||||||
log,
|
|
||||||
"Read current memory usage {} bytes ({}) from cgroups, full available stats: {}",
|
|
||||||
memory_usage,
|
|
||||||
ReadableSize(memory_usage),
|
|
||||||
cgroup_reader->dumpAllStats());
|
|
||||||
MemoryTracker::setRSS(memory_usage, 0);
|
|
||||||
|
|
||||||
LOG_INFO(log, "Purged jemalloc arenas. Current memory usage is {}", ReadableSize(memory_usage));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
LOG_INFO(log, "Dropped below soft memory limit ({})", ReadableSize(soft_limit_));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
LOG_INFO(log, "Set new limits, soft limit: {}, hard limit: {}", ReadableSize(soft_limit_), ReadableSize(hard_limit_));
|
|
||||||
}
|
|
||||||
|
|
||||||
void CgroupsMemoryUsageObserver::setOnMemoryAmountAvailableChangedFn(OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed_)
|
void CgroupsMemoryUsageObserver::setOnMemoryAmountAvailableChangedFn(OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed_)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> memory_amount_available_changed_lock(memory_amount_available_changed_mutex);
|
std::lock_guard<std::mutex> memory_amount_available_changed_lock(memory_amount_available_changed_mutex);
|
||||||
@ -300,35 +82,6 @@ void CgroupsMemoryUsageObserver::runThread()
|
|||||||
std::lock_guard<std::mutex> memory_amount_available_changed_lock(memory_amount_available_changed_mutex);
|
std::lock_guard<std::mutex> memory_amount_available_changed_lock(memory_amount_available_changed_mutex);
|
||||||
on_memory_amount_available_changed();
|
on_memory_amount_available_changed();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> limit_lock(limit_mutex);
|
|
||||||
if (soft_limit > 0 && hard_limit > 0)
|
|
||||||
{
|
|
||||||
uint64_t memory_usage = cgroup_reader->readMemoryUsage();
|
|
||||||
LOG_TRACE(log, "Read current memory usage {} bytes ({}) from cgroups", memory_usage, ReadableSize(memory_usage));
|
|
||||||
if (memory_usage > hard_limit)
|
|
||||||
{
|
|
||||||
if (last_memory_usage <= hard_limit)
|
|
||||||
on_hard_limit(true);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (last_memory_usage > hard_limit)
|
|
||||||
on_hard_limit(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (memory_usage > soft_limit)
|
|
||||||
{
|
|
||||||
if (last_memory_usage <= soft_limit)
|
|
||||||
on_soft_limit(true);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (last_memory_usage > soft_limit)
|
|
||||||
on_soft_limit(false);
|
|
||||||
}
|
|
||||||
last_memory_usage = memory_usage;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -337,13 +90,6 @@ void CgroupsMemoryUsageObserver::runThread()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ICgroupsReader> createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const fs::path & cgroup_path)
|
|
||||||
{
|
|
||||||
if (version == CgroupsMemoryUsageObserver::CgroupsVersion::V2)
|
|
||||||
return std::make_unique<CgroupsV2Reader>(cgroup_path);
|
|
||||||
else
|
|
||||||
return std::make_unique<CgroupsV1Reader>(cgroup_path);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -3,53 +3,27 @@
|
|||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool.h>
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <memory>
|
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
struct ICgroupsReader
|
/// Periodically reads the the maximum memory available to the process (which can change due to cgroups settings).
|
||||||
{
|
/// You can specify a callback to react on changes. The callback typically reloads the configuration, i.e. Server
|
||||||
virtual ~ICgroupsReader() = default;
|
/// or Keeper configuration file. This reloads settings 'max_server_memory_usage' (Server) and 'max_memory_usage_soft_limit'
|
||||||
|
/// (Keeper) from which various other internal limits are calculated, including the soft and hard limits for (1.).
|
||||||
virtual uint64_t readMemoryUsage() = 0;
|
/// The goal of this is to provide elasticity when the container is scaled-up/scaled-down. The mechanism (polling
|
||||||
|
/// cgroups) is quite implicit, unfortunately there is currently no better way to communicate memory threshold changes
|
||||||
virtual std::string dumpAllStats() = 0;
|
/// to the database.
|
||||||
};
|
|
||||||
|
|
||||||
/// Does two things:
|
|
||||||
/// 1. Periodically reads the memory usage of the process from Linux cgroups.
|
|
||||||
/// You can specify soft or hard memory limits:
|
|
||||||
/// - When the soft memory limit is hit, drop jemalloc cache.
|
|
||||||
/// - When the hard memory limit is hit, update MemoryTracking metric to throw memory exceptions faster.
|
|
||||||
/// The goal of this is to avoid that the process hits the maximum allowed memory limit at which there is a good
|
|
||||||
/// chance that the Limux OOM killer terminates it. All of this is done is because internal memory tracking in
|
|
||||||
/// ClickHouse can unfortunately under-estimate the actually used memory.
|
|
||||||
/// 2. Periodically reads the the maximum memory available to the process (which can change due to cgroups settings).
|
|
||||||
/// You can specify a callback to react on changes. The callback typically reloads the configuration, i.e. Server
|
|
||||||
/// or Keeper configuration file. This reloads settings 'max_server_memory_usage' (Server) and 'max_memory_usage_soft_limit'
|
|
||||||
/// (Keeper) from which various other internal limits are calculated, including the soft and hard limits for (1.).
|
|
||||||
/// The goal of this is to provide elasticity when the container is scaled-up/scaled-down. The mechanism (polling
|
|
||||||
/// cgroups) is quite implicit, unfortunately there is currently no better way to communicate memory threshold changes
|
|
||||||
/// to the database.
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
class CgroupsMemoryUsageObserver
|
class CgroupsMemoryUsageObserver
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using OnMemoryLimitFn = std::function<void(bool)>;
|
|
||||||
using OnMemoryAmountAvailableChangedFn = std::function<void()>;
|
using OnMemoryAmountAvailableChangedFn = std::function<void()>;
|
||||||
|
|
||||||
enum class CgroupsVersion : uint8_t
|
|
||||||
{
|
|
||||||
V1,
|
|
||||||
V2
|
|
||||||
};
|
|
||||||
|
|
||||||
explicit CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_);
|
explicit CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_);
|
||||||
~CgroupsMemoryUsageObserver();
|
~CgroupsMemoryUsageObserver();
|
||||||
|
|
||||||
void setMemoryUsageLimits(uint64_t hard_limit_, uint64_t soft_limit_);
|
|
||||||
void setOnMemoryAmountAvailableChangedFn(OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed_);
|
void setOnMemoryAmountAvailableChangedFn(OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed_);
|
||||||
|
|
||||||
void startThread();
|
void startThread();
|
||||||
@ -60,32 +34,22 @@ private:
|
|||||||
const std::chrono::seconds wait_time;
|
const std::chrono::seconds wait_time;
|
||||||
|
|
||||||
std::mutex limit_mutex;
|
std::mutex limit_mutex;
|
||||||
size_t hard_limit TSA_GUARDED_BY(limit_mutex) = 0;
|
|
||||||
size_t soft_limit TSA_GUARDED_BY(limit_mutex) = 0;
|
|
||||||
OnMemoryLimitFn on_hard_limit TSA_GUARDED_BY(limit_mutex);
|
|
||||||
OnMemoryLimitFn on_soft_limit TSA_GUARDED_BY(limit_mutex);
|
|
||||||
|
|
||||||
std::mutex memory_amount_available_changed_mutex;
|
std::mutex memory_amount_available_changed_mutex;
|
||||||
OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed TSA_GUARDED_BY(memory_amount_available_changed_mutex);
|
OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed TSA_GUARDED_BY(memory_amount_available_changed_mutex);
|
||||||
|
|
||||||
uint64_t last_memory_usage = 0; /// how much memory does the process use
|
|
||||||
uint64_t last_available_memory_amount; /// how much memory can the process use
|
uint64_t last_available_memory_amount; /// how much memory can the process use
|
||||||
|
|
||||||
void stopThread();
|
void stopThread();
|
||||||
|
|
||||||
void runThread();
|
void runThread();
|
||||||
|
|
||||||
std::unique_ptr<ICgroupsReader> cgroup_reader;
|
|
||||||
|
|
||||||
std::mutex thread_mutex;
|
std::mutex thread_mutex;
|
||||||
std::condition_variable cond;
|
std::condition_variable cond;
|
||||||
ThreadFromGlobalPool thread;
|
ThreadFromGlobalPool thread;
|
||||||
bool quit = false;
|
bool quit = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unique_ptr<ICgroupsReader>
|
|
||||||
createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const std::filesystem::path & cgroup_path);
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
class CgroupsMemoryUsageObserver
|
class CgroupsMemoryUsageObserver
|
||||||
{
|
{
|
||||||
|
@ -178,6 +178,9 @@
|
|||||||
M(ObjectStorageAzureThreads, "Number of threads in the AzureObjectStorage thread pool.") \
|
M(ObjectStorageAzureThreads, "Number of threads in the AzureObjectStorage thread pool.") \
|
||||||
M(ObjectStorageAzureThreadsActive, "Number of threads in the AzureObjectStorage thread pool running a task.") \
|
M(ObjectStorageAzureThreadsActive, "Number of threads in the AzureObjectStorage thread pool running a task.") \
|
||||||
M(ObjectStorageAzureThreadsScheduled, "Number of queued or active jobs in the AzureObjectStorage thread pool.") \
|
M(ObjectStorageAzureThreadsScheduled, "Number of queued or active jobs in the AzureObjectStorage thread pool.") \
|
||||||
|
M(BuildVectorSimilarityIndexThreads, "Number of threads in the build vector similarity index thread pool.") \
|
||||||
|
M(BuildVectorSimilarityIndexThreadsActive, "Number of threads in the build vector similarity index thread pool running a task.") \
|
||||||
|
M(BuildVectorSimilarityIndexThreadsScheduled, "Number of queued or active jobs in the build vector similarity index thread pool.") \
|
||||||
\
|
\
|
||||||
M(DiskPlainRewritableAzureDirectoryMapSize, "Number of local-to-remote path entries in the 'plain_rewritable' in-memory map for AzureObjectStorage.") \
|
M(DiskPlainRewritableAzureDirectoryMapSize, "Number of local-to-remote path entries in the 'plain_rewritable' in-memory map for AzureObjectStorage.") \
|
||||||
M(DiskPlainRewritableLocalDirectoryMapSize, "Number of local-to-remote path entries in the 'plain_rewritable' in-memory map for LocalObjectStorage.") \
|
M(DiskPlainRewritableLocalDirectoryMapSize, "Number of local-to-remote path entries in the 'plain_rewritable' in-memory map for LocalObjectStorage.") \
|
||||||
@ -292,6 +295,9 @@
|
|||||||
M(DistrCacheWriteRequests, "Number of executed Write requests to Distributed Cache") \
|
M(DistrCacheWriteRequests, "Number of executed Write requests to Distributed Cache") \
|
||||||
M(DistrCacheServerConnections, "Number of open connections to ClickHouse server from Distributed Cache") \
|
M(DistrCacheServerConnections, "Number of open connections to ClickHouse server from Distributed Cache") \
|
||||||
\
|
\
|
||||||
|
M(SchedulerIOReadScheduled, "Number of IO reads are being scheduled currently") \
|
||||||
|
M(SchedulerIOWriteScheduled, "Number of IO writes are being scheduled currently") \
|
||||||
|
\
|
||||||
M(StorageConnectionsStored, "Total count of sessions stored in the session pool for storages") \
|
M(StorageConnectionsStored, "Total count of sessions stored in the session pool for storages") \
|
||||||
M(StorageConnectionsTotal, "Total count of all sessions: stored in the pool and actively used right now for storages") \
|
M(StorageConnectionsTotal, "Total count of all sessions: stored in the pool and actively used right now for storages") \
|
||||||
\
|
\
|
||||||
|
@ -113,6 +113,56 @@ std::string_view CurrentThread::getQueryId()
|
|||||||
return current_thread->getQueryId();
|
return current_thread->getQueryId();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CurrentThread::attachReadResource(ResourceLink link)
|
||||||
|
{
|
||||||
|
if (unlikely(!current_thread))
|
||||||
|
return;
|
||||||
|
if (current_thread->read_resource_link)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has been already attached to read resource", std::to_string(getThreadId()));
|
||||||
|
current_thread->read_resource_link = link;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CurrentThread::detachReadResource()
|
||||||
|
{
|
||||||
|
if (unlikely(!current_thread))
|
||||||
|
return;
|
||||||
|
if (!current_thread->read_resource_link)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has not been attached to read resource", std::to_string(getThreadId()));
|
||||||
|
current_thread->read_resource_link.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
ResourceLink CurrentThread::getReadResourceLink()
|
||||||
|
{
|
||||||
|
if (unlikely(!current_thread))
|
||||||
|
return {};
|
||||||
|
return current_thread->read_resource_link;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CurrentThread::attachWriteResource(ResourceLink link)
|
||||||
|
{
|
||||||
|
if (unlikely(!current_thread))
|
||||||
|
return;
|
||||||
|
if (current_thread->write_resource_link)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has been already attached to write resource", std::to_string(getThreadId()));
|
||||||
|
current_thread->write_resource_link = link;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CurrentThread::detachWriteResource()
|
||||||
|
{
|
||||||
|
if (unlikely(!current_thread))
|
||||||
|
return;
|
||||||
|
if (!current_thread->write_resource_link)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread #{} has not been attached to write resource", std::to_string(getThreadId()));
|
||||||
|
current_thread->write_resource_link.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
ResourceLink CurrentThread::getWriteResourceLink()
|
||||||
|
{
|
||||||
|
if (unlikely(!current_thread))
|
||||||
|
return {};
|
||||||
|
return current_thread->write_resource_link;
|
||||||
|
}
|
||||||
|
|
||||||
MemoryTracker * CurrentThread::getUserMemoryTracker()
|
MemoryTracker * CurrentThread::getUserMemoryTracker()
|
||||||
{
|
{
|
||||||
if (unlikely(!current_thread))
|
if (unlikely(!current_thread))
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <Interpreters/Context_fwd.h>
|
#include <Interpreters/Context_fwd.h>
|
||||||
#include <Common/ThreadStatus.h>
|
#include <Common/ThreadStatus.h>
|
||||||
|
#include <Common/Scheduler/ResourceLink.h>
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
@ -23,7 +24,6 @@ class QueryStatus;
|
|||||||
struct Progress;
|
struct Progress;
|
||||||
class InternalTextLogsQueue;
|
class InternalTextLogsQueue;
|
||||||
|
|
||||||
|
|
||||||
/** Collection of static methods to work with thread-local objects.
|
/** Collection of static methods to work with thread-local objects.
|
||||||
* Allows to attach and detach query/process (thread group) to a thread
|
* Allows to attach and detach query/process (thread group) to a thread
|
||||||
* (to calculate query-related metrics and to allow to obtain query-related data from a thread).
|
* (to calculate query-related metrics and to allow to obtain query-related data from a thread).
|
||||||
@ -92,6 +92,14 @@ public:
|
|||||||
|
|
||||||
static std::string_view getQueryId();
|
static std::string_view getQueryId();
|
||||||
|
|
||||||
|
// For IO Scheduling
|
||||||
|
static void attachReadResource(ResourceLink link);
|
||||||
|
static void detachReadResource();
|
||||||
|
static ResourceLink getReadResourceLink();
|
||||||
|
static void attachWriteResource(ResourceLink link);
|
||||||
|
static void detachWriteResource();
|
||||||
|
static ResourceLink getWriteResourceLink();
|
||||||
|
|
||||||
/// Initializes query with current thread as master thread in constructor, and detaches it in destructor
|
/// Initializes query with current thread as master thread in constructor, and detaches it in destructor
|
||||||
struct QueryScope : private boost::noncopyable
|
struct QueryScope : private boost::noncopyable
|
||||||
{
|
{
|
||||||
@ -102,6 +110,39 @@ public:
|
|||||||
void logPeakMemoryUsage();
|
void logPeakMemoryUsage();
|
||||||
bool log_peak_memory_usage_in_destructor = true;
|
bool log_peak_memory_usage_in_destructor = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Scoped attach/detach of IO resource links
|
||||||
|
struct IOScope : private boost::noncopyable
|
||||||
|
{
|
||||||
|
explicit IOScope(ResourceLink read_resource_link, ResourceLink write_resource_link)
|
||||||
|
{
|
||||||
|
if (read_resource_link)
|
||||||
|
{
|
||||||
|
attachReadResource(read_resource_link);
|
||||||
|
read_attached = true;
|
||||||
|
}
|
||||||
|
if (write_resource_link)
|
||||||
|
{
|
||||||
|
attachWriteResource(write_resource_link);
|
||||||
|
write_attached = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit IOScope(const IOSchedulingSettings & settings)
|
||||||
|
: IOScope(settings.read_resource_link, settings.write_resource_link)
|
||||||
|
{}
|
||||||
|
|
||||||
|
~IOScope()
|
||||||
|
{
|
||||||
|
if (read_attached)
|
||||||
|
detachReadResource();
|
||||||
|
if (write_attached)
|
||||||
|
detachWriteResource();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool read_attached = false;
|
||||||
|
bool write_attached = false;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -609,6 +609,7 @@
|
|||||||
M(728, UNEXPECTED_DATA_TYPE) \
|
M(728, UNEXPECTED_DATA_TYPE) \
|
||||||
M(729, ILLEGAL_TIME_SERIES_TAGS) \
|
M(729, ILLEGAL_TIME_SERIES_TAGS) \
|
||||||
M(730, REFRESH_FAILED) \
|
M(730, REFRESH_FAILED) \
|
||||||
|
M(731, QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE) \
|
||||||
\
|
\
|
||||||
M(900, DISTRIBUTED_CACHE_ERROR) \
|
M(900, DISTRIBUTED_CACHE_ERROR) \
|
||||||
M(901, CANNOT_USE_DISTRIBUTED_CACHE) \
|
M(901, CANNOT_USE_DISTRIBUTED_CACHE) \
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user